diff --git a/.circleci/config.yml b/.circleci/config.yml index 5f3707c0ada7f..758d62b53e956 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -23,9 +23,6 @@ parameters: reproducibility_dispatch: type: boolean default: false - diff_asterisc_bytecode_dispatch: - type: boolean - default: false kontrol_dispatch: type: boolean default: false @@ -567,48 +564,6 @@ jobs: - notify-failures-on-develop: mentions: "@proofs-team" - diff-asterisc-bytecode: - docker: - - image: <> - resource_class: xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - enable-mise-cache: true - - run: - name: Check `RISCV.sol` bytecode - working_directory: packages/contracts-bedrock - command: | - # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` - ASTERISC_REV="v$(yq '.tools.asterisc' ../../mise.toml)" - REMOTE_ASTERISC_PATH="./src/vendor/asterisc/RISCV_Remote.sol" - git clone https://github.com/ethereum-optimism/asterisc \ - -b $ASTERISC_REV && \ - cp ./asterisc/rvsol/src/RISCV.sol $REMOTE_ASTERISC_PATH - - # Replace import paths - sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH - # Replace legacy interface paths - sed -i -e 's/src\/cannon\/interfaces\//interfaces\/cannon\//g' $REMOTE_ASTERISC_PATH - sed -i -e 's/src\/dispute\/interfaces\//interfaces\/dispute\//g' $REMOTE_ASTERISC_PATH - # Replace contract name - sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH - - # Install deps - forge install - - # Diff bytecode, with both contracts compiled in the local environment. - REMOTE_ASTERISC_CODE="$(forge inspect RISCV_Remote bytecode | tr -d '\n')" - LOCAL_ASTERISC_CODE="$(forge inspect RISCV bytecode | tr -d '\n')" - if [ "$REMOTE_ASTERISC_CODE" != "$LOCAL_ASTERISC_CODE" ]; then - echo "Asterisc bytecode mismatch. Local version does not match remote. Diff:" - diff <(echo "$REMOTE_ASTERISC_CODE") <(echo "$LOCAL_ASTERISC_CODE") - else - echo "Asterisc version up to date." - fi - - notify-failures-on-develop: - mentions: "@clabby @proofs-team" - contracts-bedrock-build: docker: - image: <> @@ -960,6 +915,7 @@ jobs: working_directory: packages/contracts-bedrock - check-changed: patterns: <> + - install-solc-compilers - run: name: Print dependencies command: just dep-status @@ -1147,6 +1103,8 @@ jobs: command: | just print-pinned-block-number > ./pinnedBlockNumber.txt cat pinnedBlockNumber.txt + environment: + ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io working_directory: packages/contracts-bedrock - restore_cache: name: Restore forked state @@ -1239,6 +1197,7 @@ jobs: cat pinnedBlockNumber.txt environment: FORK_BASE_CHAIN: <> + ETH_RPC_URL: <> working_directory: packages/contracts-bedrock - restore_cache: name: Restore forked state @@ -1296,7 +1255,7 @@ jobs: contracts-bedrock-upload: machine: true - resource_class: ethereum-optimism/latitude-1 + resource_class: large steps: - utils/checkout-with-mise: checkout-method: blobless @@ -1373,7 +1332,7 @@ jobs: command: forge --version - run: name: Pull cached artifacts - command: bash scripts/ops/pull-artifacts.sh --fallback-to-latest + command: bash scripts/ops/pull-artifacts.sh working_directory: packages/contracts-bedrock - run: name: Run checks @@ -2780,10 +2739,9 @@ workflows: parameters: features: &features_matrix - main + - CUSTOM_GAS_TOKEN - OPTIMISM_PORTAL_INTEROP - - CANNON_KONA,DEPLOY_V2_DISPUTE_GAMES - OPCM_V2 - - CUSTOM_GAS_TOKEN - OPCM_V2,CUSTOM_GAS_TOKEN - OPCM_V2,OPTIMISM_PORTAL_INTEROP context: @@ -2851,9 +2809,6 @@ workflows: - op-deployer-forge-version: context: - circleci-repo-readonly-authenticated-github-token - - diff-asterisc-bytecode: - context: - - circleci-repo-readonly-authenticated-github-token - semgrep-scan: name: semgrep-scan-local scan_command: semgrep scan --timeout=100 --config .semgrep/rules/ --error . diff --git a/.cursor/rules/solidity-styles.mdc b/.cursor/rules/solidity-styles.mdc index f3292277ecb8e..d7f4dddcafe6b 100644 --- a/.cursor/rules/solidity-styles.mdc +++ b/.cursor/rules/solidity-styles.mdc @@ -12,7 +12,8 @@ Applies to Solidity files. - NatSpec documentation comments must use the triple-slash `///` style - Use `//` for regular inline comments that are not NatSpec -- Always use `@notice` instead of `@dev` +- Use `@notice` for documenting what a function/contract does (external-facing documentation) +- Use `@dev` for internal developer notes, reminders, or invariants (e.g., "when updating this, also update X") - Use a line-length of 100 characters - Custom tags: - `@custom:proxied`: Add to a contract whenever it's meant to live behind a proxy diff --git a/.gitmodules b/.gitmodules index c501b82eebcfd..e4c4c3b2887f8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -38,10 +38,10 @@ branch = dev [submodule "op-rbuilder"] path = op-rbuilder - url = git@github.com:flashbots/op-rbuilder.git + url = https://github.com/flashbots/op-rbuilder [submodule "rollup-boost"] path = rollup-boost - url = git@github.com:flashbots/rollup-boost.git + url = https://github.com/flashbots/rollup-boost [submodule "kona"] path = kona - url = git@github.com:op-rs/kona.git + url = https://github.com/op-rs/kona diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index ed1bb708a3dd7..ef272fb6fe126 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -324,6 +324,7 @@ rules: - packages/contracts-bedrock/src/L1/opcm/OPContractsManagerContainer.sol - packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol - packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtilsCaller.sol + - packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol - packages/contracts-bedrock/src/L1/OptimismPortal2.sol - packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol - packages/contracts-bedrock/src/L2/FeeVault.sol @@ -398,3 +399,14 @@ rules: paths: include: - packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol + + - id: sol-style-ban-forge-std-test-import + languages: [solidity] + severity: ERROR + message: Import Test from test/setup/Test.sol, not forge-std/Test.sol. Import other forge-std components (stdStorage, StdStorage, stdError, StdUtils, Vm, console2, etc.) from their specific files (forge-std/StdStorage.sol, forge-std/StdError.sol, forge-std/StdUtils.sol, forge-std/Vm.sol, forge-std/console2.sol, etc.) + pattern-regex: import\s+(\{[^}]*\}\s+from\s+)?"forge-std/Test\.sol"\s*; + paths: + include: + - packages/contracts-bedrock/test + exclude: + - packages/contracts-bedrock/test/setup/Test.sol diff --git a/.semgrep/tests/sol-rules.sol-style-ban-forge-std-test-import.t.sol b/.semgrep/tests/sol-rules.sol-style-ban-forge-std-test-import.t.sol new file mode 100644 index 0000000000000..58f6f19357ecd --- /dev/null +++ b/.semgrep/tests/sol-rules.sol-style-ban-forge-std-test-import.t.sol @@ -0,0 +1,20 @@ +// ruleid: sol-style-ban-forge-std-test-import +import { Test } from "forge-std/Test.sol"; + +// ruleid: sol-style-ban-forge-std-test-import +import { Test as ForgeTest } from "forge-std/Test.sol"; + +// ruleid: sol-style-ban-forge-std-test-import +import { Test, Vm } from "forge-std/Test.sol"; + +// ok: sol-style-ban-forge-std-test-import +import { Test } from "test/setup/Test.sol"; + +// ok: sol-style-ban-forge-std-test-import +import { Test as BaseTest } from "test/setup/Test.sol"; + +// ok: sol-style-ban-forge-std-test-import +import { Vm } from "forge-std/Vm.sol"; + +// ok: sol-style-ban-forge-std-test-import +import { StdUtils } from "forge-std/StdUtils.sol"; diff --git a/cannon/mipsevm/iface.go b/cannon/mipsevm/iface.go index f12197a617639..ed65960e570be 100644 --- a/cannon/mipsevm/iface.go +++ b/cannon/mipsevm/iface.go @@ -74,7 +74,6 @@ type Metadata interface { // Toggles here are temporary and should be removed once the newer state version is deployed widely. The older // version can then be supported via multicannon pulling in a specific build and support for it dropped in latest code. type FeatureToggles struct { - SupportWorkingSysGetRandom bool } type FPVM interface { diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index 31cd20546b076..c57c8655cd5a2 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -159,10 +159,7 @@ func (m *InstrumentedState) handleSyscall() error { v0 = 0 v1 = 0 case arch.SysGetRandom: - if m.features.SupportWorkingSysGetRandom { - v0, v1 = m.syscallGetRandom(a0, a1) - } - // Otherwise, ignored (noop) + v0, v1 = m.syscallGetRandom(a0, a1) case arch.SysMunmap: case arch.SysMprotect: case arch.SysGetAffinity: diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index c5e75cc4937a2..120ce903dacff 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) type insnCache interface { @@ -632,21 +631,6 @@ func TestEVM_MMap(t *testing.T) { Run(t, cases) } -func TestEVM_SysGetRandom_isImplemented(t *testing.T) { - t.Parallel() - // Assert we have at least one vm with the working getrandom syscall - foundVmWithSyscallEnabled := false - for _, vers := range GetMipsVersionTestCases(t) { - features := versions.FeaturesForVersion(vers.Version) - foundVmWithSyscallEnabled = foundVmWithSyscallEnabled || features.SupportWorkingSysGetRandom - } - require.True(t, foundVmWithSyscallEnabled) - - // Assert that latest version has a working getrandom ssycall - latestFeatures := versions.FeaturesForVersion(versions.GetExperimentalVersion()) - require.True(t, latestFeatures.SupportWorkingSysGetRandom) -} - func TestEVM_SysGetRandom(t *testing.T) { t.Parallel() @@ -702,18 +686,12 @@ func TestEVM_SysGetRandom(t *testing.T) { } setExpectations := func(t require.TestingT, testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { - isNoop := !versions.FeaturesForVersion(vm.Version).SupportWorkingSysGetRandom expectedMemory := testCase.expectedRandDataMask&randomData | ^testCase.expectedRandDataMask&startingMemory expected.ExpectStep() - if isNoop { - expected.ActiveThread().Registers[register.RegSyscallRet1] = 0 - expected.ActiveThread().Registers[register.RegSyscallErrno] = 0 - } else { - expected.ActiveThread().Registers[register.RegSyscallRet1] = testCase.expectedReturnValue - expected.ActiveThread().Registers[register.RegSyscallErrno] = 0 - expected.ExpectMemoryWrite(effAddr, expectedMemory) - } + expected.ActiveThread().Registers[register.RegSyscallRet1] = testCase.expectedReturnValue + expected.ActiveThread().Registers[register.RegSyscallErrno] = 0 + expected.ExpectMemoryWrite(effAddr, expectedMemory) return ExpectNormalExecution() } @@ -985,10 +963,6 @@ func TestEVM_RandomProgram(t *testing.T) { t.Run(v.Name, func(t *testing.T) { t.Parallel() - if !versions.FeaturesForVersion(v.Version).SupportWorkingSysGetRandom { - t.Skip("Skipping vm version that does not support working sys_getrandom") - } - validator := testutil.NewEvmValidator(t, v.StateHashFn, v.Contracts) var stdOutBuf, stdErrBuf bytes.Buffer diff --git a/cannon/mipsevm/tests/evm_multithreaded64_test.go b/cannon/mipsevm/tests/evm_multithreaded64_test.go index 19036552cef9a..ba96f0dd9439f 100644 --- a/cannon/mipsevm/tests/evm_multithreaded64_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded64_test.go @@ -16,7 +16,6 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) func TestEVM_MT64_LL(t *testing.T) { @@ -608,7 +607,6 @@ var NoopSyscalls64 = map[string]uint32{ "SysPipe2": 5287, "SysEpollCtl": 5208, "SysEpollPwait": 5272, - "SysGetRandom": 5313, "SysUname": 5061, //"SysStat64": UndefinedSysNr, "SysGetuid": 5100, @@ -624,49 +622,46 @@ var NoopSyscalls64 = map[string]uint32{ "SysTimerDelete": 5220, } -func getNoopSyscalls64(vmVersion versions.StateVersion) map[string]uint32 { - noOpCalls := maps.Clone(NoopSyscalls64) - features := versions.FeaturesForVersion(vmVersion) - if features.SupportWorkingSysGetRandom { - delete(noOpCalls, "SysGetRandom") - } - return noOpCalls -} - -func getSupportedSyscalls(vmVersion versions.StateVersion) []uint32 { - supportedSyscalls := []uint32{arch.SysMmap, arch.SysBrk, arch.SysClone, arch.SysExitGroup, arch.SysRead, arch.SysWrite, arch.SysFcntl, arch.SysExit, arch.SysSchedYield, arch.SysGetTID, arch.SysFutex, arch.SysOpen, arch.SysNanosleep, arch.SysClockGetTime, arch.SysGetpid, arch.SysEventFd2} - - features := versions.FeaturesForVersion(vmVersion) - if features.SupportWorkingSysGetRandom { - supportedSyscalls = append(supportedSyscalls, arch.SysGetRandom) - } - return supportedSyscalls +var SupportedSyscalls64 = []uint32{ + arch.SysMmap, + arch.SysBrk, + arch.SysClone, + arch.SysExitGroup, + arch.SysRead, + arch.SysWrite, + arch.SysFcntl, + arch.SysExit, + arch.SysSchedYield, + arch.SysGetTID, + arch.SysFutex, + arch.SysOpen, + arch.SysNanosleep, + arch.SysClockGetTime, + arch.SysGetpid, + arch.SysEventFd2, + arch.SysGetRandom, } func TestEVM_NoopSyscall64(t *testing.T) { t.Parallel() for _, vmVersion := range GetMipsVersionTestCases(t) { - noOpCalls := getNoopSyscalls64(vmVersion.Version) - testNoopSyscall(t, vmVersion, noOpCalls) + testNoopSyscall(t, vmVersion, NoopSyscalls64) } } func TestEVM_UnsupportedSyscall64(t *testing.T) { t.Parallel() - for _, vmVersion := range GetMipsVersionTestCases(t) { - var noopSyscallNums = maps.Values(getNoopSyscalls64(vmVersion.Version)) - var SupportedSyscalls = getSupportedSyscalls(vmVersion.Version) - unsupportedSyscalls := make([]uint32, 0, 400) - for i := 5000; i < 5400; i++ { - candidate := uint32(i) - if slices.Contains(SupportedSyscalls, candidate) || slices.Contains(noopSyscallNums, candidate) { - continue - } - unsupportedSyscalls = append(unsupportedSyscalls, candidate) + noopSyscallNums := maps.Values(NoopSyscalls64) + unsupportedSyscalls := make([]uint32, 0, 400) + for i := 5000; i < 5400; i++ { + candidate := uint32(i) + if slices.Contains(SupportedSyscalls64, candidate) || slices.Contains(noopSyscallNums, candidate) { + continue } - - unsupported := unsupportedSyscalls - testUnsupportedSyscall(t, vmVersion, unsupported) + unsupportedSyscalls = append(unsupportedSyscalls, candidate) + } + for _, vmVersion := range GetMipsVersionTestCases(t) { + testUnsupportedSyscall(t, vmVersion, unsupportedSyscalls) } } diff --git a/cannon/mipsevm/tests/helpers.go b/cannon/mipsevm/tests/helpers.go index a5c5109ecef7f..afed044e6641b 100644 --- a/cannon/mipsevm/tests/helpers.go +++ b/cannon/mipsevm/tests/helpers.go @@ -88,12 +88,7 @@ func GetMipsVersionTestCases(t require.TestingT) []VersionedVMTestCase { var cases []VersionedVMTestCase for _, version := range versions.StateVersionTypes { if !arch.IsMips32 && versions.IsSupportedMultiThreaded64(version) { - goTarget := testutil.Go1_24 - features := versions.FeaturesForVersion(version) - if features.SupportWorkingSysGetRandom { - goTarget = testutil.Go1_25 - } - cases = append(cases, GetMultiThreadedTestCase(t, version, goTarget)) + cases = append(cases, GetMultiThreadedTestCase(t, version, testutil.Go1_25)) } } return cases diff --git a/cannon/mipsevm/testutil/evm.go b/cannon/mipsevm/testutil/evm.go index 9086d176d6d97..09bdaf262add6 100644 --- a/cannon/mipsevm/testutil/evm.go +++ b/cannon/mipsevm/testutil/evm.go @@ -139,6 +139,8 @@ type testChain struct { startTime uint64 } +var _ core.ChainContext = (*testChain)(nil) + func (d *testChain) Engine() consensus.Engine { return ethash.NewFullFaker() } @@ -147,6 +149,18 @@ func (d *testChain) Config() *params.ChainConfig { return d.config } +func (d *testChain) CurrentHeader() *types.Header { + panic("unimplemented") +} + +func (d *testChain) GetHeaderByHash(hash common.Hash) *types.Header { + panic("unimplemented") +} + +func (d *testChain) GetHeaderByNumber(number uint64) *types.Header { + return d.GetHeader(common.Hash{}, number) +} + func (d *testChain) GetHeader(h common.Hash, n uint64) *types.Header { parentHash := common.Hash{0: 0xff} binary.BigEndian.PutUint64(parentHash[1:], n-1) diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index e325e93c75d86..dcc5aa79677ac 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -69,9 +69,6 @@ func (s *VersionedState) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, func FeaturesForVersion(version StateVersion) mipsevm.FeatureToggles { features := mipsevm.FeatureToggles{} // Set any required feature toggles based on the state version here. - if version >= VersionMultiThreaded64_v5 { - features.SupportWorkingSysGetRandom = true - } return features } diff --git a/docs/security-reviews/2026_01-U18-Cantina.pdf b/docs/security-reviews/2026_01-U18-Cantina.pdf new file mode 100644 index 0000000000000..49d648750f568 Binary files /dev/null and b/docs/security-reviews/2026_01-U18-Cantina.pdf differ diff --git a/docs/security-reviews/README.md b/docs/security-reviews/README.md index a6b07ac226b4f..0db85e1b1507a 100644 --- a/docs/security-reviews/README.md +++ b/docs/security-reviews/README.md @@ -49,6 +49,7 @@ Please see the report for the specific details. | 2025-10 | Spearbit | Revenue sharing and FeeVaults | [2025_10-Rev-Sharing-Spearbit.pdf](./2025_10-Rev-Sharing-Spearbit.pdf) / [2025_11-Rev-Sharing-Contracts-Upgrader.pdf](./2025_11-Rev-Sharing-Contracts-Upgrader.pdf) | f1fcd96406d895f37c2d1a422d50ea7dbd03a491 | op-contracts/v5.2.0+l2-fee-splitter-contracts | | 2025-11 | Spearbit | Safer Safes | [2025_11-SaferSafes-Spearbit.pdf](./2025_11-SaferSafes-Spearbit.pdf) | cb54822c5e18925498f48d8677b71992bf402631 | op-safe-contracts/v1.0.0 | | 2025-11 | Spearbit | Custom Gas Token | [2025_11-Custom-Gas-Token-Spearbit.pdf](./2025_11-Custom-Gas-Token-Spearbit.pdf) | 1f888ede1940fce20f71db89fc13039fdd96757e | op-contracts/v6.0.0 | +| 2026-01 | Cantina | Upgrade 18 | [2026_01-U18-Cantina.pdf](./2026_01-U18-Cantina.pdf) | 87d406db86907833f75d5c8fb26ade3dcb85eb41 | op-contracts/v6.0.0 | diff --git a/go.mod b/go.mod index 0375835b33336..db8d21c6be00d 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.9.0 github.com/go-task/slim-sprig/v3 v3.0.0 - github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb + github.com/golang/snappy v1.0.0 github.com/google/go-cmp v0.7.0 github.com/google/go-github/v55 v55.0.0 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 @@ -86,7 +86,7 @@ require ( github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect - github.com/VictoriaMetrics/fastcache v1.12.2 // indirect + github.com/VictoriaMetrics/fastcache v1.13.0 // indirect github.com/adrg/xdg v0.4.0 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect github.com/allegro/bigcache v1.2.1 // indirect @@ -314,7 +314,7 @@ require ( lukechampine.com/blake3 v1.3.0 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/okx/op-geth v1.101408.1-0.20251224021339-340217527706 +replace github.com/ethereum/go-ethereum => github.com/okx/op-geth v1.101408.1-0.20260115072314-cbe8100db96e //replace github.com/ethereum/go-ethereum => ./op-geth diff --git a/go.sum b/go.sum index b9e2978b916e2..7bb69d4941119 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,8 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= -github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= -github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/VictoriaMetrics/fastcache v1.13.0 h1:AW4mheMR5Vd9FkAPUv+NH6Nhw+fmbTMGMsNAoA/+4G0= +github.com/VictoriaMetrics/fastcache v1.13.0/go.mod h1:hHXhl4DA2fTL2HTZDJFXWgW0LNjo6B+4aj2Wmng3TjU= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -53,7 +53,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= @@ -118,7 +117,6 @@ github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chelnak/ysmrr v0.6.0 h1:kMhO0oI02tl/9szvxrOE0yeImtrK4KQhER0oXu1K/iM= @@ -348,8 +346,8 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= -github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -674,8 +672,8 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= -github.com/okx/op-geth v1.101408.1-0.20251224021339-340217527706 h1:BL0PaM/kKAio6xXjTeFTFTf+YOBkqR0UmuK1v+I8arw= -github.com/okx/op-geth v1.101408.1-0.20251224021339-340217527706/go.mod h1:Qtp2CxjdW02xvbrkdxP805lEPvFs7/DSpRakf4qlUrY= +github.com/okx/op-geth v1.101408.1-0.20260115072314-cbe8100db96e h1:779DgYZt+fs2laYHylXEGX2YX3yD60f1iSDe3/8+fis= +github.com/okx/op-geth v1.101408.1-0.20260115072314-cbe8100db96e/go.mod h1:6WuihOhJbnAdM53Y6rJIPOIYuTS/RJLzc1vXxI3KDlw= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1163,7 +1161,6 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= diff --git a/kurtosis-devnet/justfile b/kurtosis-devnet/justfile index 13e6680f5cef0..29195104da56f 100644 --- a/kurtosis-devnet/justfile +++ b/kurtosis-devnet/justfile @@ -31,10 +31,10 @@ _docker_build_stack TAG TARGET *ARGS: (_docker_build TAG TARGET "../" "ops/docke cannon-image TAG='cannon:devnet': (_docker_build_stack TAG "cannon-target") da-server-image TAG='da-server:devnet': (_docker_build_stack TAG "da-server-target") op-batcher-image TAG='op-batcher:devnet': (_docker_build_stack TAG "op-batcher-target") -# TODO: this is a temporary hack to get the kona + asterisc version right. +# TODO: this is a temporary hack to get the kona version right. # Ideally the Dockerfile should be self-sufficient (right now we depend on # docker-bake.hcl to do the right thing). -op-challenger-image TAG='op-challenger:devnet': (_docker_build_stack TAG "op-challenger-target" "--build-arg" "KONA_VERSION=1.0.1" "--build-arg" "ASTERISC_VERSION=v1.3.0") +op-challenger-image TAG='op-challenger:devnet': (_docker_build_stack TAG "op-challenger-target" "--build-arg" "KONA_VERSION=1.0.1") op-conductor-image TAG='op-conductor:devnet': (_docker_build_stack TAG "op-conductor-target") op-deployer-image TAG='op-deployer:devnet': (_docker_build_stack TAG "op-deployer-target") op-dispute-mon-image TAG='op-dispute-mon:devnet': (_docker_build_stack TAG "op-dispute-mon-target") diff --git a/mise.toml b/mise.toml index 19f107a05bb49..6735d595e28fd 100644 --- a/mise.toml +++ b/mise.toml @@ -45,7 +45,6 @@ op-acceptor = "op-acceptor/v3.8.1" # Put things here if you need to track versions of tools or projects that can't # actually be managed by mise (yet). Make sure that anything you put in here is # also found inside of disabled_tools or mise will try to install it. -asterisc = "1.3.0" kontrol = "1.0.90" binary_signer = "1.0.4" @@ -64,11 +63,10 @@ svm-rs = "ubi:alloy-rs/svm-rs[exe=svm]" # These are disabled, but latest mise versions error if they don't have a known # install source even though it won't ever actually use that source. -asterisc = "ubi:ethereum-optimism/fake-asterisc" kontrol = "ubi:ethereum-optimism/fake-kontrol" binary_signer = "ubi:ethereum-optimism/fake-binary_signer" [settings] experimental = true pipx.uvx = true -disable_tools = ["asterisc", "kontrol", "binary_signer"] +disable_tools = ["kontrol", "binary_signer"] diff --git a/op-acceptance-tests/tests/base/eth_simulate_test.go b/op-acceptance-tests/tests/base/eth_simulate_test.go new file mode 100644 index 0000000000000..0d9fd4e17ec46 --- /dev/null +++ b/op-acceptance-tests/tests/base/eth_simulate_test.go @@ -0,0 +1,81 @@ +package base + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" +) + +func TestEthSimulateV1(gt *testing.T) { + t := devtest.ParallelT(gt) + sys := presets.NewMinimal(t) + ctx := t.Ctx() + + type SimulateParams struct { + ReturnFullTransactions bool + BlockStateCalls []any `json:"blockStateCalls"` + } + + params := SimulateParams{ + ReturnFullTransactions: true, + BlockStateCalls: []any{ + map[string]any{ + "calls": []any{ + map[string]any{ + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "data": "0x", + }, + }, + }, + }, + } + + // wait until the chain mines at least one block + // (known limitation that we cannot simulate on top of the genesis block, + // Since the EL will just reuse the l1 attributes tx from the previous block + // and there is no such transaction for the genesis block). + sys.L1Network.WaitForBlock() + + // Require the RPC call to succeed + rpcClient := sys.L2EL.Escape().EthClient().RPC() + var resp []map[string]any + err := rpcClient.CallContext( + ctx, + &resp, + "eth_simulateV1", + params, + "0x1", // Block 1 + ) + require.NoError(t, err) + + // Require exactly one block, matching input + require.Len(t, resp, 1) + respBlock := resp[0] + + // Require exactly one transaction, matching input + require.Len(t, respBlock["transactions"], 1) + transaction := (respBlock["transactions"].([]any)[0]).(map[string]any) + + // Transaction type should be dynamic fee transaction type, not a deposit transaction. + require.Equal(t, "0x2", transaction["type"]) // 0x02 is the dynamic fee transaction type + + // Check Blob Gas Used is nonzero + // This proves out that eth_simulateV1 can be used to estimate the DA size of a transaction + bgu, err := hexutil.DecodeUint64(respBlock["blobGasUsed"].(string)) + require.NoError(t, err) + require.NotZero(t, bgu) + + err = rpcClient.CallContext( + ctx, + &resp, + "eth_simulateV1", + params, + "0x0", // Genesis block + ) + t.Log("resp", resp) + require.Error(t, err, "eth_simulateV1 cannot be used on the genesis block") +} diff --git a/op-batcher/batcher/channel_config.go b/op-batcher/batcher/channel_config.go index 20ff60ea1a5a1..2f23796c8f4d9 100644 --- a/op-batcher/batcher/channel_config.go +++ b/op-batcher/batcher/channel_config.go @@ -53,7 +53,7 @@ type ChannelConfig struct { // ChannelConfig returns a copy of the receiver. // This allows the receiver to be a static ChannelConfigProvider of itself. -func (cc ChannelConfig) ChannelConfig(isPectra, isThrottling bool) ChannelConfig { +func (cc ChannelConfig) ChannelConfig(isThrottling bool) ChannelConfig { return cc } diff --git a/op-batcher/batcher/channel_config_provider.go b/op-batcher/batcher/channel_config_provider.go index 35a57ade85ae8..81d018bfaa8d7 100644 --- a/op-batcher/batcher/channel_config_provider.go +++ b/op-batcher/batcher/channel_config_provider.go @@ -12,11 +12,11 @@ import ( type ( ChannelConfigProvider interface { - ChannelConfig(isPectra, isThrottling bool) ChannelConfig + ChannelConfig(isThrottling bool) ChannelConfig } GasPricer interface { - SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error) + SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobTipCap *big.Int, blobBaseFee *big.Int, err error) } DynamicEthChannelConfig struct { @@ -53,7 +53,7 @@ func NewDynamicEthChannelConfig(lgr log.Logger, // // The blob config is returned when throttling is in progress, prioritizing throughput over cost // in times of limited bandwidth. -func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) ChannelConfig { +func (dec *DynamicEthChannelConfig) ChannelConfig(isThrottling bool) ChannelConfig { if isThrottling { dec.log.Info("Using blob channel config while throttling is in progress") dec.lastConfig = &dec.blobConfig @@ -61,7 +61,7 @@ func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) C } ctx, cancel := context.WithTimeout(context.Background(), dec.timeout) defer cancel() - tipCap, baseFee, blobBaseFee, err := dec.gasPricer.SuggestGasPriceCaps(ctx) + tipCap, baseFee, blobTipCap, blobBaseFee, err := dec.gasPricer.SuggestGasPriceCaps(ctx) if err != nil { dec.log.Warn("Error querying gas prices, returning last config", "err", err) return *dec.lastConfig @@ -81,8 +81,14 @@ func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) C numBlobsPerTx := dec.blobConfig.TargetNumFrames // Compute the total absolute cost of submitting either a single calldata tx or a single blob tx. - calldataCost, blobCost := computeSingleCalldataTxCost(tokensPerCalldataTx, baseFee, tipCap, isPectra), - computeSingleBlobTxCost(numBlobsPerTx, baseFee, tipCap, blobBaseFee) + calldataCost, blobCost, oracleBlobCost := + computeSingleCalldataTxCost(tokensPerCalldataTx, baseFee, tipCap), + computeSingleBlobTxCost(numBlobsPerTx, baseFee, tipCap, blobBaseFee), + computeSingleBlobTxCost(numBlobsPerTx, baseFee, blobTipCap, blobBaseFee) + + // TODO(18618): before activating the blob tip oracle, confirm in prod that we mostly get newBlobSavings == true, otherwise + // it is not worth it using the oracle + oracleBlobSavings := oracleBlobCost.Cmp(blobCost) < 0 // Now we compare the absolute cost per tx divided by the number of bytes per tx: blobDataBytesPerTx := big.NewInt(eth.MaxBlobDataSize * int64(numBlobsPerTx)) @@ -97,6 +103,8 @@ func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) C lgr := dec.log.New("base_fee", baseFee, "blob_base_fee", blobBaseFee, "tip_cap", tipCap, "calldata_bytes", calldataBytesPerTx, "calldata_cost", calldataCost, "blob_data_bytes", blobDataBytesPerTx, "blob_cost", blobCost, + "oracle_blob_cost", oracleBlobCost, + "oracle_blob_savings", oracleBlobSavings, "cost_ratio", costRatio) if ay.Cmp(bx) == 1 { @@ -109,22 +117,14 @@ func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) C return dec.blobConfig } -func computeSingleCalldataTxCost(numTokens uint64, baseFee, tipCap *big.Int, isPectra bool) *big.Int { +func computeSingleCalldataTxCost(numTokens uint64, baseFee, tipCap *big.Int) *big.Int { // We assume isContractCreation = false and execution_gas_used = 0 in https://eips.ethereum.org/EIPS/eip-7623 // This is a safe assumption given how batcher transactions are constructed. - const ( - standardTokenCost = 4 - totalCostFloorPerToken = 10 - ) - var multiplier uint64 - if isPectra { - multiplier = totalCostFloorPerToken - } else { - multiplier = standardTokenCost - } + // Since Pectra is active on L1, we use the totalCostFloorPerToken (10) as the multiplier. + const totalCostFloorPerToken = 10 calldataPrice := new(big.Int).Add(baseFee, tipCap) - calldataGas := big.NewInt(int64(params.TxGas + numTokens*multiplier)) + calldataGas := big.NewInt(int64(params.TxGas + numTokens*totalCostFloorPerToken)) return new(big.Int).Mul(calldataGas, calldataPrice) } diff --git a/op-batcher/batcher/channel_config_provider_test.go b/op-batcher/batcher/channel_config_provider_test.go index 6f4ea7701c0b1..40cf3a9c74213 100644 --- a/op-batcher/batcher/channel_config_provider_test.go +++ b/op-batcher/batcher/channel_config_provider_test.go @@ -17,14 +17,15 @@ type mockGasPricer struct { err error tipCap int64 baseFee int64 + blobTipCap int64 blobBaseFee int64 } -func (gp *mockGasPricer) SuggestGasPriceCaps(context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error) { +func (gp *mockGasPricer) SuggestGasPriceCaps(context.Context) (tipCap *big.Int, baseFee *big.Int, blobTipCap *big.Int, blobBaseFee *big.Int, err error) { if gp.err != nil { - return nil, nil, nil, gp.err + return nil, nil, nil, nil, gp.err } - return big.NewInt(gp.tipCap), big.NewInt(gp.baseFee), big.NewInt(gp.blobBaseFee), nil + return big.NewInt(gp.tipCap), big.NewInt(gp.baseFee), big.NewInt(gp.blobTipCap), big.NewInt(gp.blobBaseFee), nil } func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { @@ -38,13 +39,13 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { UseBlobs: true, } + // Since Pectra is now always active on L1, we only test with Pectra pricing (totalCostFloorPerToken = 10) tests := []struct { name string tipCap int64 baseFee int64 blobBaseFee int64 wantCalldata bool - isL1Pectra bool isThrottling bool }{ { @@ -57,51 +58,21 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { name: "close-cheaper-blobs", tipCap: 1e3, baseFee: 1e6, - blobBaseFee: 16e6, // because of amortized fixed 21000 tx cost, blobs are still cheaper here... - }, - { - name: "close-cheaper-calldata", - tipCap: 1e3, - baseFee: 1e6, - blobBaseFee: 161e5, // ...but then increasing the fee just a tiny bit makes blobs more expensive - wantCalldata: true, - }, - { - name: "much-cheaper-calldata", - tipCap: 1e3, - baseFee: 1e6, - blobBaseFee: 1e9, - wantCalldata: true, - }, - { - name: "much-cheaper-blobs-l1-pectra", - tipCap: 1e3, - baseFee: 1e6, - blobBaseFee: 1, - isL1Pectra: true, - }, - { - name: "close-cheaper-blobs-l1-pectra", - tipCap: 1e3, - baseFee: 1e6, blobBaseFee: 398e5, // this value just under the equilibrium point for 3 blobs - isL1Pectra: true, }, { - name: "close-cheaper-calldata-l1-pectra", + name: "close-cheaper-calldata", tipCap: 1e3, baseFee: 1e6, blobBaseFee: 399e5, // this value just over the equilibrium point for 3 blobs wantCalldata: true, - isL1Pectra: true, }, { - name: "much-cheaper-calldata-l1-pectra", + name: "much-cheaper-calldata", tipCap: 1e3, baseFee: 1e6, blobBaseFee: 1e9, wantCalldata: true, - isL1Pectra: true, }, { // blobs should be chosen even though calldata is cheaper. @@ -121,7 +92,7 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { blobBaseFee: tt.blobBaseFee, } dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg) - cc := dec.ChannelConfig(tt.isL1Pectra, tt.isThrottling) + cc := dec.ChannelConfig(tt.isThrottling) if tt.wantCalldata { require.Equal(t, cc, calldataCfg) require.NotNil(t, ch.FindLog(testlog.NewMessageContainsFilter("calldata"))) @@ -143,21 +114,21 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { err: errors.New("gp-error"), } dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg) - require.Equal(t, dec.ChannelConfig(false, false), blobCfg) + require.Equal(t, dec.ChannelConfig(false), blobCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelWarn), testlog.NewMessageContainsFilter("returning last config"), )) gp.err = nil - require.Equal(t, dec.ChannelConfig(false, false), calldataCfg) + require.Equal(t, dec.ChannelConfig(false), calldataCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelInfo), testlog.NewMessageContainsFilter("calldata"), )) gp.err = errors.New("gp-error-2") - require.Equal(t, dec.ChannelConfig(false, false), calldataCfg) + require.Equal(t, dec.ChannelConfig(false), calldataCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelWarn), testlog.NewMessageContainsFilter("returning last config"), @@ -166,11 +137,8 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { } func TestComputeSingleCalldataTxCost(t *testing.T) { - // 30KB of data - got := computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), false) - require.Equal(t, big.NewInt(1_002_000), got) // (21_000 + 4*120_000) * (1+1) - - got = computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), true) + // 30KB of data - since Pectra is active, we use totalCostFloorPerToken = 10 + got := computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1)) require.Equal(t, big.NewInt(2_442_000), got) // (21_000 + 10*120_000) * (1+1) } diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index f65844a674502..a331c188404b9 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -62,7 +62,7 @@ func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider Channe log: log, metr: metr, cfgProvider: cfgProvider, - defaultCfg: cfgProvider.ChannelConfig(false, false), + defaultCfg: cfgProvider.ChannelConfig(false), rollupCfg: rollupCfg, outFactory: NewChannelOut, txChannels: make(map[string]*channel), @@ -224,7 +224,7 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { // It will decide whether to switch DA type automatically. // When switching DA type, the channelManager state will be rebuilt // with a new ChannelConfig. -func (s *channelManager) TxData(l1Head eth.BlockID, isPectra bool, isThrottling bool, pi pubInfo) (txData, error) { +func (s *channelManager) TxData(l1Head eth.BlockID, isThrottling bool, pi pubInfo) (txData, error) { channel, err := s.getReadyChannel(l1Head, pi) if err != nil { return emptyTxData, err @@ -236,7 +236,7 @@ func (s *channelManager) TxData(l1Head eth.BlockID, isPectra bool, isThrottling } // Call provider method to reassess optimal DA type - newCfg := s.cfgProvider.ChannelConfig(isPectra, isThrottling) + newCfg := s.cfgProvider.ChannelConfig(isThrottling) // No change: if newCfg.UseBlobs == s.defaultCfg.UseBlobs { @@ -285,7 +285,7 @@ type pubInfo struct { // If forcePublish is true, it will force close channels and // generate frames for them. func (s *channelManager) getReadyChannel(l1Head eth.BlockID, pi pubInfo) (*channel, error) { - if pi.forcePublish && s.currentChannel.TotalFrames() == 0 { + if pi.forcePublish && s.currentChannel != nil && s.currentChannel.TotalFrames() == 0 { s.log.Info("Force-closing channel and creating frames", "channel_id", s.currentChannel.ID()) s.currentChannel.Close() if err := s.currentChannel.OutputFrames(); err != nil { diff --git a/op-batcher/batcher/channel_manager_memory_test.go b/op-batcher/batcher/channel_manager_memory_test.go index fe305aa2fe671..b8f657fc10f68 100644 --- a/op-batcher/batcher/channel_manager_memory_test.go +++ b/op-batcher/batcher/channel_manager_memory_test.go @@ -124,7 +124,7 @@ func runMemoryTest(t *testing.T, batchType uint, compressorType string, compress require.NoError(t, m.processBlocks()) // Try to get transaction data to fill channels - _, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + _, err := m.TxData(eth.BlockID{}, false, pubInfo{}) // It's okay if there's no data ready (io.EOF) if err != nil && err.Error() != "EOF" { require.NoError(t, err) diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 0c4feb9e0eb4c..8f66dfd5f59cd 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -103,9 +103,9 @@ func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) { require.NoError(t, m.AddL2Block(a)) - _, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + _, err := m.TxData(eth.BlockID{}, false, pubInfo{}) require.NoError(t, err) - _, err = m.TxData(eth.BlockID{}, false, false, pubInfo{}) + _, err = m.TxData(eth.BlockID{}, false, pubInfo{}) require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, m.AddL2Block(x), ErrReorg) @@ -207,7 +207,7 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { require.NoError(m.AddL2Block(a)) - txdata0, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + txdata0, err := m.TxData(eth.BlockID{}, false, pubInfo{}) require.NoError(err) txdata0bytes := txdata0.CallData() data0 := make([]byte, len(txdata0bytes)) @@ -215,13 +215,13 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { copy(data0, txdata0bytes) // ensure channel is drained - _, err = m.TxData(eth.BlockID{}, false, false, pubInfo{}) + _, err = m.TxData(eth.BlockID{}, false, pubInfo{}) require.ErrorIs(err, io.EOF) // requeue frame m.TxFailed(txdata0.ID()) - txdata1, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + txdata1, err := m.TxData(eth.BlockID{}, false, pubInfo{}) require.NoError(err) data1 := txdata1.CallData() @@ -284,7 +284,7 @@ type FakeDynamicEthChannelConfig struct { assessments int } -func (f *FakeDynamicEthChannelConfig) ChannelConfig(isPectra, isThrottling bool) ChannelConfig { +func (f *FakeDynamicEthChannelConfig) ChannelConfig(isThrottling bool) ChannelConfig { f.assessments++ if f.chooseBlobs { return f.blobConfig @@ -338,7 +338,7 @@ func TestChannelManager_IgnoreMaxChannelDuration(t *testing.T) { // Call TxData a first time - if `ignoreMaxChannelDuration` is `false`, channel would be timed out, // but since `ignoreMaxChannelDuration` is `true`, we expect it to be not timed out. - _, err := m.TxData(eth.BlockID{Number: 21}, false, false, pubInfo{ignoreMaxChannelDuration: true}) + _, err := m.TxData(eth.BlockID{Number: 21}, false, pubInfo{ignoreMaxChannelDuration: true}) require.ErrorIs(t, err, io.EOF) // Add more blocks to the channel manager @@ -351,7 +351,7 @@ func TestChannelManager_IgnoreMaxChannelDuration(t *testing.T) { require.False(t, m.channelQueue[0].IsFull()) // Call TxData again, with ignoreMaxChannelDuration unset. - _, err = m.TxData(eth.BlockID{Number: 22}, false, false, pubInfo{}) + _, err = m.TxData(eth.BlockID{Number: 22}, false, pubInfo{}) require.NoError(t, err) require.NotEmpty(t, m.channelQueue) @@ -406,7 +406,7 @@ func TestChannelManager_TxData(t *testing.T) { m.blocks = queue.Queue[SizedBlock]{SizedBlock{Block: blockA}} // Call TxData a first time to trigger blocks->channels pipeline - _, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + _, err := m.TxData(eth.BlockID{}, false, pubInfo{}) require.ErrorIs(t, err, io.EOF) // The test requires us to have something in the channel queue @@ -425,7 +425,7 @@ func TestChannelManager_TxData(t *testing.T) { var data txData for { m.blocks.Enqueue(SizedBlock{Block: blockA}) - data, err = m.TxData(eth.BlockID{}, false, false, pubInfo{}) + data, err = m.TxData(eth.BlockID{}, false, pubInfo{}) if err == nil && data.Len() > 0 { break } @@ -753,7 +753,7 @@ func TestChannelManager_TxData_ForcePublish(t *testing.T) { m.blocks = queue.Queue[SizedBlock]{SizedBlock{Block: blockA}} // Call TxData a first time to trigger blocks->channels pipeline - txData, err := m.TxData(eth.BlockID{}, false, false, pubInfo{}) + txData, err := m.TxData(eth.BlockID{}, false, pubInfo{}) require.ErrorIs(t, err, io.EOF) require.Zero(t, txData.Len(), 0) @@ -763,7 +763,7 @@ func TestChannelManager_TxData_ForcePublish(t *testing.T) { require.False(t, m.channelQueue[0].IsFull()) // Call TxData with force publish enabled - txData, err = m.TxData(eth.BlockID{}, false, false, pubInfo{forcePublish: true}) + txData, err = m.TxData(eth.BlockID{}, false, pubInfo{forcePublish: true}) // Despite no additional blocks being added, we should have tx data: require.NoError(t, err) @@ -870,7 +870,7 @@ func TestChannelManagerUnsafeBytes(t *testing.T) { _, err = manager.TxData(eth.BlockID{ Hash: common.Hash{}, Number: 0, - }, true, false, pubInfo{}) + }, false, pubInfo{}) } assert.Equal(t, tc.afterAddingToChannel, manager.UnsafeDABytes()) @@ -1042,3 +1042,63 @@ func TestChannelManagerUnsafeBytes(t *testing.T) { }) }) } + +func TestChannelManager_SingleBlockBiggerThanMaxFrameSize(t *testing.T) { + rng := rand.New(rand.NewSource(int64(1234))) // use fixed seed for reproducibility / determinism + a := derivetest.RandomL2BlockWithChainId(rng, 4, defaultTestRollupConfig.L2ChainID) + l1BlockID := eth.BlockID{ + Hash: a.Hash(), + Number: a.NumberU64(), + } + + for _, ca := range derive.CompressionAlgos { + t.Run(string(ca), func(t *testing.T) { + require := require.New(t) + assert := assert.New(t) + log := testlog.Logger(t, log.LevelCrit) + + // Use an extremely low frame size that will definitely not be enough for the random block + cfg := channelManagerTestConfig(derive.FrameV0OverHeadSize, derive.SingularBatchType) + cfg.InitShadowCompressor(ca) + m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + require.NoError(m.AddL2Block(a)) + + // Make sure there is a channel + require.NoError(m.ensureChannelWithSpace(l1BlockID)) + channel := m.currentChannel + require.NotNil(channel) + require.Equal(1, m.pendingBlocks()) + require.Zero(len(channel.blocks)) + + // Process the blocks + require.NoError(m.processBlocks()) + + // The block should have been moved into the channel + // This test is a regression test for a bug where the channel manager would not + // correctly handle a single block that was bigger than the maximum frame size, + // because it incorrectly interpreted the + // static header bytes written to the buffer at construction time as block data. + assert.Equal(0, m.pendingBlocks()) + assert.Equal(1, len(channel.blocks), "channel should have one block") + }) + } +} + +// TestChannelManager_getReadyChannel_NilChannel verifies that getReadyChannel +// handles nil currentChannel gracefully when forcePublish is true. +// This is a regression test for a nil pointer dereference bug. +func TestChannelManager_getReadyChannel_NilChannel(t *testing.T) { + log := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(120_000, derive.SingularBatchType) + m := NewChannelManager(log, metrics.NoopMetrics, cfg, &rollup.Config{}) + m.Clear(eth.BlockID{}) + + require.Nil(t, m.currentChannel, "currentChannel should be nil after Clear()") + + l1Head := eth.BlockID{Hash: common.HexToHash("0x1234"), Number: 100} + + // Should not panic when currentChannel is nil and forcePublish is true + require.NotPanics(t, func() { + _, _ = m.getReadyChannel(l1Head, pubInfo{forcePublish: true}) + }, "getReadyChannel should not panic when currentChannel is nil") +} diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index cd1bb2628f53d..308ad3832a86e 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -771,7 +771,7 @@ func (l *BatchSubmitter) waitNodeSync() error { cCtx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout) defer cancel() - l1Tip, _, err := l.l1Tip(cCtx) + l1Tip, err := l.l1Tip(cCtx) if err != nil { return fmt.Errorf("failed to retrieve l1 tip: %w", err) } @@ -866,7 +866,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) { // publishTxToL1 submits a single state tx to the L1 func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group, pi pubInfo) error { // send all available transactions - l1tip, isPectra, err := l.l1Tip(ctx) + l1tip, err := l.l1Tip(ctx) if err != nil { l.Log.Error("Failed to query L1 tip", "err", err) return err @@ -877,7 +877,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t // Collect next transaction data. This pulls data out of the channel, so we need to make sure // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. l.channelMgrMutex.Lock() - txdata, err := l.channelMgr.TxData(l1tip.ID(), isPectra, params.IsThrottling(), pi) + txdata, err := l.channelMgr.TxData(l1tip.ID(), params.IsThrottling(), pi) l.channelMgrMutex.Unlock() if err == io.EOF { @@ -1089,16 +1089,14 @@ func (l *BatchSubmitter) recordConfirmedTx(id txID, receipt *types.Receipt) { // l1Tip gets the current L1 tip as a L1BlockRef. The passed context is assumed // to be a lifetime context, so it is internally wrapped with a network timeout. -// It also returns a boolean indicating if the tip is from a Pectra chain. -func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, bool, error) { +func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) { tctx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout) defer cancel() head, err := l.L1Client.HeaderByNumber(tctx, nil) if err != nil { - return eth.L1BlockRef{}, false, fmt.Errorf("getting latest L1 block: %w", err) + return eth.L1BlockRef{}, fmt.Errorf("getting latest L1 block: %w", err) } - isPectra := head.RequestsHash != nil // See https://eips.ethereum.org/EIPS/eip-7685 - return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), isPectra, nil + return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), nil } func (l *BatchSubmitter) checkTxpool(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) bool { diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index e253420b1acee..b4edbc30f4ede 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math/big" "sync/atomic" "time" @@ -19,7 +20,9 @@ import ( "github.com/ethereum-optimism/optimism/op-node/chaincfg" "github.com/ethereum-optimism/optimism/op-node/params" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/bgpo" "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/httputil" @@ -80,6 +83,10 @@ type BatcherService struct { stopped atomic.Bool NotSubmittingOnStart bool + + // BlobGasPriceOracle tracks blob base gas prices for dynamic pricing + blobTipOracle *bgpo.BlobTipOracle + oracleStopCh chan struct{} } type DriverSetupOption func(setup *DriverSetup) @@ -169,7 +176,7 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, closeApp contex if err := bs.initRollupConfig(ctx); err != nil { return fmt.Errorf("failed to load rollup config: %w", err) } - if err := bs.initTxManager(cfg); err != nil { + if err := bs.initTxManager(ctx, cfg); err != nil { return fmt.Errorf("failed to init Tx manager: %w", err) } // must be init before driver and channel config @@ -254,6 +261,56 @@ func (bs *BatcherService) initRollupConfig(ctx context.Context) error { return nil } +func (bs *BatcherService) initBlobTipOracle(ctx context.Context, cfg *CLIConfig) error { + // Only initialize the oracle if we're using blobs or auto mode + if cfg.DataAvailabilityType != flags.BlobsType && cfg.DataAvailabilityType != flags.AutoType { + bs.Log.Debug("Skipping blob tip oracle initialization (not using blobs)") + return nil + } + + // Get RPC client from L1 client + // The ethclient.Client has a Client() method that returns the underlying *rpc.Client + rpcClient := bs.L1Client.Client() + if rpcClient == nil { + return fmt.Errorf("failed to get RPC client from L1 client") + } + + // Get L1 chain config from rollup config + l1ChainID := eth.ChainIDFromBig(bs.RollupConfig.L1ChainID) + l1ChainConfig := eth.L1ChainConfigByChainID(l1ChainID) + if l1ChainConfig == nil { + bs.Log.Info("Blob tip oracle not initialized when L1 chain ID is not known (Ethereum mainnet, Sepolia, Holesky, Hoodi)") + return nil + } + + // Wrap the RPC client to match the client.RPC interface + baseRPCClient := client.NewBaseRPCClient(rpcClient) + + // Create the oracle with default config + oracleConfig := bgpo.DefaultBlobTipOracleConfig() + oracleConfig.NetworkTimeout = bs.NetworkTimeout + minTipCap, err := eth.GweiToWei(cfg.TxMgrConfig.MinTipCapGwei) + if err != nil { + return fmt.Errorf("invalid min tip cap: %w", err) + } + oracleConfig.DefaultPriorityFee = minTipCap + bs.blobTipOracle = bgpo.NewBlobTipOracle(ctx, baseRPCClient, l1ChainConfig, bs.Log, oracleConfig) + bs.oracleStopCh = make(chan struct{}) + + bs.Log.Info("Initialized blob tip oracle") + + // Start the blob tip oracle if it's initialized + go func() { + if err := bs.blobTipOracle.Start(); err != nil { + bs.Log.Error("Blob tip oracle stopped with error", "err", err) + } + close(bs.oracleStopCh) + }() + bs.blobTipOracle.WaitCachePopulated() + bs.Log.Info("Started blob tip oracle") + return nil +} + func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error { channelTimeout := bs.RollupConfig.ChannelTimeoutBedrock // Use lower channel timeout if granite is scheduled. @@ -340,8 +397,52 @@ func (bs *BatcherService) initChannelConfig(cfg *CLIConfig) error { return nil } -func (bs *BatcherService) initTxManager(cfg *CLIConfig) error { - txManager, err := txmgr.NewSimpleTxManager("batcher", bs.Log, bs.Metrics, cfg.TxMgrConfig) +func (bs *BatcherService) initTxManager(ctx context.Context, cfg *CLIConfig) error { + // Initialize the blob tip oracle first + if err := bs.initBlobTipOracle(ctx, cfg); err != nil { + return fmt.Errorf("failed to init blob tip oracle: %w", err) + } + + // Create the base config from CLI config + txmgrConfig, err := txmgr.NewConfig(cfg.TxMgrConfig, bs.Log) + if err != nil { + return err + } + + // Create a custom gas price estimator that uses the blob tip oracle if available + if bs.blobTipOracle != nil { + txmgrConfig.GasPriceEstimatorFn = func(ctx context.Context, backend txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, *big.Int, error) { + // Get tip and base fee from backend (standard way for execution gas) + tip, err := backend.SuggestGasTipCap(ctx) + if err != nil { + return nil, nil, nil, nil, err + } + + head, err := backend.HeaderByNumber(ctx, nil) + if err != nil { + return nil, nil, nil, nil, err + } + if head.BaseFee == nil { + return nil, nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") + } + + blobBaseFee, err := backend.BlobBaseFee(ctx) + if err != nil { + return nil, nil, nil, nil, err + } + + // Use the oracle's SuggestBlobTipCap for blob tip fee suggestion + // This analyzes recent blob transactions to suggest an appropriate blob tip fee + suggestedBlobFeeCap, err := bs.blobTipOracle.SuggestBlobTipCap(ctx, 0, 0) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("blob tip oracle failed to suggest blob tip fee: %w", err) + } + + return tip, head.BaseFee, suggestedBlobFeeCap, blobBaseFee, nil + } + } + + txManager, err := txmgr.NewSimpleTxManagerFromConfig("batcher", bs.Log, bs.Metrics, txmgrConfig) if err != nil { return err } @@ -439,7 +540,7 @@ func (bs *BatcherService) initAltDA(cfg *CLIConfig) error { // Start runs once upon start of the batcher lifecycle, // and starts batch-submission work if the batcher is configured to start submit data on startup. -func (bs *BatcherService) Start(_ context.Context) error { +func (bs *BatcherService) Start(ctx context.Context) error { bs.driver.Log.Info("Starting batcher", "notSubmittingOnStart", bs.NotSubmittingOnStart) if !bs.NotSubmittingOnStart { @@ -475,6 +576,20 @@ func (bs *BatcherService) Stop(ctx context.Context) error { bs.TxManager.Close() } + // Stop the blob tip oracle if it's running + if bs.blobTipOracle != nil { + bs.blobTipOracle.Close() + // Wait for the oracle goroutine to finish + if bs.oracleStopCh != nil { + select { + case <-bs.oracleStopCh: + // Oracle stopped + case <-ctx.Done(): + // Context cancelled, force stop + } + } + } + var result error if bs.driver != nil { if err := bs.driver.StopBatchSubmittingIfRunning(ctx); err != nil { diff --git a/op-batcher/batcher/test_batch_submitter.go b/op-batcher/batcher/test_batch_submitter.go index 2b0b9649a3658..acd6a8c36c912 100644 --- a/op-batcher/batcher/test_batch_submitter.go +++ b/op-batcher/batcher/test_batch_submitter.go @@ -27,7 +27,7 @@ func (l *TestBatchSubmitter) JamTxPool(ctx context.Context) error { } var candidate *txmgr.TxCandidate var err error - cc := l.channelMgr.cfgProvider.ChannelConfig(true, false) + cc := l.channelMgr.cfgProvider.ChannelConfig(false) if cc.UseBlobs { candidate = l.calldataTxCandidate([]byte{}) } else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil { diff --git a/op-batcher/compressor/shadow_compressor.go b/op-batcher/compressor/shadow_compressor.go index d29f7583c4cf2..682c0ef40b88c 100644 --- a/op-batcher/compressor/shadow_compressor.go +++ b/op-batcher/compressor/shadow_compressor.go @@ -71,9 +71,10 @@ func (t *ShadowCompressor) Write(p []byte) (int, error) { newBound = uint64(t.shadowCompressor.Len()) + CloseOverheadZlib if newBound > t.config.TargetOutputSize { t.fullErr = derive.ErrCompressorFull - if t.Len() > 0 { + if t.Len() > t.compressor.StaticBytesLen() { // only return an error if we've already written data to this compressor before - // (otherwise single blocks over the target would never be written) + // (otherwise single blocks over the target would never be written). Ignore static + // header bytes written to the buffer at construction time return 0, t.fullErr } } diff --git a/op-chain-ops/addresses/contracts.go b/op-chain-ops/addresses/contracts.go index 88ab066a52ad0..208dde83b5ae5 100644 --- a/op-chain-ops/addresses/contracts.go +++ b/op-chain-ops/addresses/contracts.go @@ -35,6 +35,7 @@ type ImplementationsContracts struct { OpcmInteropMigratorImpl common.Address OpcmStandardValidatorImpl common.Address OpcmUtilsImpl common.Address + OpcmMigratorImpl common.Address OpcmV2Impl common.Address OpcmContainerImpl common.Address DelayedWethImpl common.Address diff --git a/op-chain-ops/cmd/check-prestate/main.go b/op-chain-ops/cmd/check-prestate/main.go index a3a5864ddf167..47c5477ce3113 100644 --- a/op-chain-ops/cmd/check-prestate/main.go +++ b/op-chain-ops/cmd/check-prestate/main.go @@ -97,7 +97,7 @@ func main() { switch prestateType { case "cannon32", "cannon64", "interop": prestateImpl = prestate.NewOPProgramPrestate() - case "cannon-kona": + case "cannon64-kona": prestateImpl = prestate.NewKonaPrestate() default: log.Crit("Invalid prestate type", "type", prestateType) diff --git a/op-chain-ops/cmd/check-prestate/prestate/opprogram.go b/op-chain-ops/cmd/check-prestate/prestate/opprogram.go index 5360851c2e70d..f6868014f1b38 100644 --- a/op-chain-ops/cmd/check-prestate/prestate/opprogram.go +++ b/op-chain-ops/cmd/check-prestate/prestate/opprogram.go @@ -9,12 +9,13 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/superchain" "golang.org/x/mod/modfile" + "golang.org/x/mod/module" ) const ( monorepoGoModAtTag = "https://github.com/ethereum-optimism/optimism/raw/refs/tags/%s/go.mod" superchainRegistryCommitAtRef = "https://github.com/ethereum-optimism/op-geth/raw/%s/superchain-registry-commit.txt" - superchainConfigsZipAtTag = "https://github.com/ethereum-optimism/op-geth/raw/refs/tags/%s/superchain/superchain-configs.zip" + superchainConfigsZipAtRef = "https://github.com/ethereum-optimism/op-geth/raw/%s/superchain/superchain-configs.zip" ) type OPProgramPrestate struct { @@ -38,7 +39,7 @@ func (p *OPProgramPrestate) FindVersions(log log.Logger, prestateVersion string) if err != nil { log.Crit("Failed to fetch go mod", "err", err) } - elVersion := p.findOpGethVersion(log, modFile) + elVersion := resolvePseudoVersion(p.findOpGethVersion(log, modFile)) elCommitInfo = types.NewCommitInfo("ethereum-optimism", "op-geth", elVersion, "optimism", "") registryCommitBytes, err := util.Fetch(fmt.Sprintf(superchainRegistryCommitAtRef, elVersion)) @@ -48,7 +49,7 @@ func (p *OPProgramPrestate) FindVersions(log log.Logger, prestateVersion string) superChainRegistryCommit = strings.TrimSpace(string(registryCommitBytes)) log.Info("Found superchain registry commit info", "commit", superChainRegistryCommit) - prestateConfigData, err := util.Fetch(fmt.Sprintf(superchainConfigsZipAtTag, elVersion)) + prestateConfigData, err := util.Fetch(fmt.Sprintf(superchainConfigsZipAtRef, elVersion)) if err != nil { log.Crit("Failed to fetch prestate's superchain registry config zip", "err", err) } @@ -84,3 +85,17 @@ func fetchMonorepoGoMod(opProgramTag string) (*modfile.File, error) { return modfile.Parse("go.mod", goMod, nil) } + +// resolvePseudoVersion converts a Go module version to a git ref. +// For pseudo-versions like "v1.101604.0-synctest.0.0.20251208094937-ba6bdcfef423", +// it extracts the commit hash suffix. For regular tags, it returns the version as-is. +func resolvePseudoVersion(version string) string { + if module.IsPseudoVersion(version) { + rev, err := module.PseudoVersionRev(version) + if err != nil { + log.Crit("Failed to extract commit hash from pseudo-version", "version", version, "err", err) + } + return rev + } + return version +} diff --git a/op-chain-ops/cmd/op-simulate/main.go b/op-chain-ops/cmd/op-simulate/main.go index efe5324946c3a..0d4e1db391b77 100644 --- a/op-chain-ops/cmd/op-simulate/main.go +++ b/op-chain-ops/cmd/op-simulate/main.go @@ -238,6 +238,8 @@ type simChainContext struct { cfg *params.ChainConfig } +var _ core.ChainContext = (*simChainContext)(nil) + func (d *simChainContext) Engine() consensus.Engine { return d.eng } @@ -249,6 +251,24 @@ func (d *simChainContext) GetHeader(h common.Hash, n uint64) *types.Header { panic(fmt.Errorf("header retrieval not supported, cannot fetch %s %d", h, n)) } +func (d *simChainContext) CurrentHeader() *types.Header { + return d.head +} + +func (d *simChainContext) GetHeaderByHash(hash common.Hash) *types.Header { + if d.head.Hash() == hash { + return d.head + } + panic(fmt.Errorf("header retrieval not supported, cannot fetch %s", hash)) +} + +func (d *simChainContext) GetHeaderByNumber(number uint64) *types.Header { + if d.head.Number.Uint64() == number { + return d.head + } + panic(fmt.Errorf("header retrieval not supported, cannot fetch %d", number)) +} + func (d *simChainContext) Config() *params.ChainConfig { return d.cfg } diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index aaaeb32456f91..9937c5dbf4fa1 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -254,6 +254,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme AllowCustomDisputeParameters: true, OperatorFeeScalar: cfg.GasPriceOracleOperatorFeeScalar, OperatorFeeConstant: cfg.GasPriceOracleOperatorFeeConstant, + SuperchainConfig: superDeployment.SuperchainConfigProxy, UseCustomGasToken: cfg.UseCustomGasToken, }) if err != nil { diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index 86bde012dc0e2..6a5e0a6143f54 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -18,6 +18,7 @@ type Implementations struct { OpcmInteropMigrator common.Address `json:"OPCMInteropMigrator"` OpcmStandardValidator common.Address `json:"OPCMStandardValidator"` OpcmUtils common.Address `json:"OPCMUtils"` + OpcmMigrator common.Address `json:"OPCMMigrator"` OpcmV2 common.Address `json:"OPCMV2"` OpcmContainer common.Address `json:"OPCMContainer"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` diff --git a/op-chain-ops/script/deploy.go b/op-chain-ops/script/deploy.go index 9c39121a1db2b..b1d1a18b58de0 100644 --- a/op-chain-ops/script/deploy.go +++ b/op-chain-ops/script/deploy.go @@ -211,9 +211,15 @@ func (b *forgeScriptBackendImpl) Deploy(artifact *foundry.Artifact, label string b.host.AllowCheatcodes(address) // before constructor execution, give our script cheatcode access b.host.state.MakeExcluded(address) // scripts are persistent across forks - // disable contract size constraints + // disable contract size constraints for script deployment + wasNoMaxCodeSize := b.host.noMaxCodeSize b.host.EnforceMaxCodeSize(false) - defer b.host.EnforceMaxCodeSize(true) + defer func() { + // Only re-enable if it wasn't originally disabled + if !wasNoMaxCodeSize { + b.host.EnforceMaxCodeSize(true) + } + }() // deploy the script deployedAddr, err := b.host.Create(deployer, artifact.Bytecode.Object) diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index b94c05bc6d0f5..6f4e303591e9b 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -117,6 +117,9 @@ type Host struct { // useCreate2Deployer uses the Create2Deployer for broadcasted // create2 calls. useCreate2Deployer bool + + // noMaxCodeSize disables the maximum contract bytecode size check. + noMaxCodeSize bool } type HostOption func(h *Host) @@ -161,6 +164,15 @@ func WithCreate2Deployer() HostOption { } } +// WithNoMaxCodeSize disables the maximum contract bytecode size check. +// This is useful for development environments where contracts may be compiled +// without optimizations and exceed the standard 24KB limit. +func WithNoMaxCodeSize() HostOption { + return func(h *Host) { + h.noMaxCodeSize = true + } +} + // NewHost creates a Host that can load contracts from the given Artifacts FS, // and with an EVM initialized to the given executionContext. // Optionally src-map loading may be enabled, by providing a non-nil srcFS to read sources from. @@ -298,6 +310,11 @@ func NewHost( h.env = WrapEVM(vm.NewEVM(blockContext, h.state, h.chainCfg, vmCfg)) h.env.SetTxContext(txContext) + // Apply noMaxCodeSize after EVM is initialized + if h.noMaxCodeSize { + h.EnforceMaxCodeSize(false) + } + return h } diff --git a/op-chain-ops/script/script_test.go b/op-chain-ops/script/script_test.go index 814bac637f73f..8bac8afcf8bbe 100644 --- a/op-chain-ops/script/script_test.go +++ b/op-chain-ops/script/script_test.go @@ -472,3 +472,57 @@ func TestScriptErrorHandling(t *testing.T) { }) } } + +func TestWithNoMaxCodeSize(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + af := foundry.OpenArtifactsDir("./testdata/test-artifacts") + scriptContext := DefaultContext + deployer := scriptContext.Sender + + // Create init code that deploys a contract with >24KB runtime code + // Init code structure: + // PUSH2 0x6400 (25600 bytes = 25KB) + // PUSH1 0x0c (offset where runtime code starts) + // PUSH1 0x00 (memory destination) + // CODECOPY + // PUSH2 0x6400 (size to return) + // PUSH1 0x00 (memory offset) + // RETURN + runtimeSize := 25 * 1024 // 25KB runtime code + initCode := []byte{ + 0x61, 0x64, 0x00, // PUSH2 0x6400 + 0x60, 0x0c, // PUSH1 0x0c (12 bytes - length of this init code) + 0x60, 0x00, // PUSH1 0x00 + 0x39, // CODECOPY + 0x61, 0x64, 0x00, // PUSH2 0x6400 + 0x60, 0x00, // PUSH1 0x00 + 0xf3, // RETURN + } + // Append runtime code (can be any data, we'll use zeros) + runtimeCode := make([]byte, runtimeSize) + largeBytecode := append(initCode, runtimeCode...) + + t.Run("WithNoMaxCodeSize allows large contracts", func(t *testing.T) { + h := NewHost(logger, af, nil, scriptContext, WithNoMaxCodeSize()) + require.True(t, h.noMaxCodeSize, "noMaxCodeSize flag should be set") + require.True(t, h.env.Config().NoMaxCodeSize, "EVM should have NoMaxCodeSize enabled") + + addr, err := h.Create(deployer, largeBytecode) + require.NoError(t, err, "Should deploy large contract when NoMaxCodeSize is enabled") + require.NotEqual(t, common.Address{}, addr, "Should return valid address") + + // Verify the code was actually deployed + code := h.GetCode(addr) + require.NotEmpty(t, code, "Contract code should be deployed") + }) + + t.Run("Default behavior rejects large contracts", func(t *testing.T) { + h := NewHost(logger, af, nil, scriptContext) + require.False(t, h.noMaxCodeSize, "noMaxCodeSize flag should be false by default") + require.False(t, h.env.Config().NoMaxCodeSize, "EVM should enforce max code size by default") + + _, err := h.Create(deployer, largeBytecode) + require.Error(t, err, "Should reject large contract by default") + require.Contains(t, err.Error(), "max code size", "Error should mention max code size") + }) +} diff --git a/op-chain-ops/script/with.go b/op-chain-ops/script/with.go index 0823bf9b7fbd8..9bfc9dffd692c 100644 --- a/op-chain-ops/script/with.go +++ b/op-chain-ops/script/with.go @@ -44,9 +44,15 @@ func WithScript[B any](h *Host, name string, contract string) (b *B, cleanup fun return nil, nil, fmt.Errorf("failed to make bindings: %w", err) } - // Scripts can be very large + // Scripts can be very large - disable contract size constraints for script deployment + wasNoMaxCodeSize := h.noMaxCodeSize h.EnforceMaxCodeSize(false) - defer h.EnforceMaxCodeSize(true) + defer func() { + // Only re-enable if it wasn't originally disabled + if !wasNoMaxCodeSize { + h.EnforceMaxCodeSize(true) + } + }() // deploy the script contract deployedAddr, err := h.Create(deployer, artifact.Bytecode.Object) if err != nil { diff --git a/op-challenger/README.md b/op-challenger/README.md index d217402479b80..cc6af5a908e57 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -218,7 +218,7 @@ Prints the list of current claims in a dispute game. * `ROLLUP_RPC` - the RPC endpoint of the L2 consensus client to use * `DATA_DIR` - the directory to use to store data * `PRESTATES_URL` - the base URL to download required prestates from -* `RUN_CONFIG` - the trace providers and prestates to run. e.g. `cannon,asterisc-kona/kona-0.1.0-alpha.5/0x03c50fbef46a05f93ea7665fa89015c2108e10c1b4501799c0663774bd35a9c5` +* `RUN_CONFIG` - the trace providers and prestates to run. e.g. `cannon,cannon-kona/kona-0.1.0-alpha.5/0x03c50fbef46a05f93ea7665fa89015c2108e10c1b4501799c0663774bd35a9c5` Testing utility that continuously runs the specified trace providers against real chain data. The trace providers can be configured with multiple different prestates. This allows testing both the current and potential future prestates with @@ -226,7 +226,7 @@ the fault proofs virtual machine used by the trace provider. The same CLI options as `op-challenger` itself are supported to configure the trace providers. The additional `--run` option allows specifying which prestates to use. The format is `gameType/name/prestateHash` where gameType is the -game type to use with the prestate (e.g cannon or asterisc-kona), name is an arbitrary name for the prestate to use +game type to use with the prestate (e.g cannon or cannon-kona), name is an arbitrary name for the prestate to use when reporting metrics and prestateHash is the hex encoded absolute prestate commitment to use. If name is omitted the game type name is used. If the prestateHash is omitted, the absolute prestate hash used for new games on-chain is used. diff --git a/op-challenger/cmd/main_test.go b/op-challenger/cmd/main_test.go index 0647e675f2c93..ae0a710064686 100644 --- a/op-challenger/cmd/main_test.go +++ b/op-challenger/cmd/main_test.go @@ -8,6 +8,7 @@ import ( "time" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/superchain" "github.com/stretchr/testify/require" @@ -26,7 +27,7 @@ var ( network = "op-mainnet" testNetwork = "op-sepolia" l2EthRpc = "http://example.com:9545" - supervisorRpc = "http://example.com/supervisor" + superRpc = "http://example.com/super" cannonBin = "./bin/cannon" cannonServer = "./bin/op-program" cannonPreState = "./pre.json" @@ -34,9 +35,6 @@ var ( cannonKonaPreState = "./cannon-kona-pre.json" datadir = "./test_data" rollupRpc = "http://example.com:8555" - asteriscBin = "./bin/asterisc" - asteriscServer = "./bin/op-program" - asteriscPreState = "./pre.json" ) func TestLogLevel(t *testing.T) { @@ -86,6 +84,17 @@ func TestL1ETHRPCAddress(t *testing.T) { }) } +func TestL1ETHRPCKind(t *testing.T) { + t.Run("Valid", func(t *testing.T) { + const kind = sources.RPCKindAlchemy + cfg := configForArgs(t, addRequiredArgs(gameTypes.AlphabetGameType, "--l1-rpc-kind", kind.String())) + require.Equal(t, kind, cfg.L1RPCKind) + }) + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid(t, "invalid value \"bob\" for flag -l1-rpc-kind", addRequiredArgs(gameTypes.AlphabetGameType, "--l1-rpc-kind", "bob")) + }) +} + func TestL1Beacon(t *testing.T) { t.Run("Required", func(t *testing.T) { verifyArgsInvalid(t, "flag l1-beacon is required", addRequiredArgsExcept(gameTypes.AlphabetGameType, "--l1-beacon")) @@ -98,53 +107,44 @@ func TestL1Beacon(t *testing.T) { }) } -func TestOpSupervisor(t *testing.T) { +func TestSuperNodeRpc(t *testing.T) { t.Run("RequiredForSuperCannon", func(t *testing.T) { - verifyArgsInvalid(t, "flag supervisor-rpc is required", addRequiredArgsExcept(gameTypes.SuperCannonGameType, "--supervisor-rpc")) + verifyArgsInvalid(t, "flag supernode-rpc is required", addRequiredArgsExcept(gameTypes.SuperCannonGameType, "--supernode-rpc")) }) t.Run("RequiredForSuperPermissioned", func(t *testing.T) { - verifyArgsInvalid(t, "flag supervisor-rpc is required", addRequiredArgsExcept(gameTypes.SuperPermissionedGameType, "--supervisor-rpc")) + verifyArgsInvalid(t, "flag supernode-rpc is required", addRequiredArgsExcept(gameTypes.SuperPermissionedGameType, "--supernode-rpc")) }) t.Run("RequiredForSuperCannonKona", func(t *testing.T) { - verifyArgsInvalid(t, "flag supervisor-rpc is required", addRequiredArgsExcept(gameTypes.SuperCannonKonaGameType, "--supervisor-rpc")) - }) - t.Run("RequiredForSuperAsteriscKona", func(t *testing.T) { - verifyArgsInvalid(t, "flag supervisor-rpc is required", addRequiredArgsExcept(gameTypes.SuperAsteriscKonaGameType, "--supervisor-rpc")) + verifyArgsInvalid(t, "flag supernode-rpc is required", addRequiredArgsExcept(gameTypes.SuperCannonKonaGameType, "--supernode-rpc")) }) for _, gameType := range gameTypes.SupportedGameTypes { gameType := gameType - if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperAsteriscKonaGameType || gameType == gameTypes.SuperCannonKonaGameType { + if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperCannonKonaGameType { continue } t.Run("NotRequiredForGameType-"+gameType.String(), func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameType, "--supervisor-rpc")) + configForArgs(t, addRequiredArgsExcept(gameType, "--supernode-rpc")) }) } t.Run("Valid-SuperCannon", func(t *testing.T) { - url := "http://localhost/supervisor" - cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperCannonGameType, "--supervisor-rpc", "--supervisor-rpc", url)) - require.Equal(t, url, cfg.SupervisorRPC) + url := "http://localhost/super" + cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperCannonGameType, "--supernode-rpc", "--supernode-rpc", url)) + require.Equal(t, url, cfg.SuperRPC) }) t.Run("Valid-SuperPermissioned", func(t *testing.T) { - url := "http://localhost/supervisor" - cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperPermissionedGameType, "--supervisor-rpc", "--supervisor-rpc", url)) - require.Equal(t, url, cfg.SupervisorRPC) + url := "http://localhost/super" + cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperPermissionedGameType, "--supernode-rpc", "--supernode-rpc", url)) + require.Equal(t, url, cfg.SuperRPC) }) t.Run("Valid-SuperCannonKona", func(t *testing.T) { - url := "http://localhost/supervisor" - cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperCannonKonaGameType, "--supervisor-rpc", "--supervisor-rpc", url)) - require.Equal(t, url, cfg.SupervisorRPC) - }) - - t.Run("Valid-SuperAsteriscKona", func(t *testing.T) { - url := "http://localhost/supervisor" - cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperAsteriscKonaGameType, "--supervisor-rpc", "--supervisor-rpc", url)) - require.Equal(t, url, cfg.SupervisorRPC) + url := "http://localhost/super" + cfg := configForArgs(t, addRequiredArgsExcept(gameTypes.SuperCannonKonaGameType, "--supernode-rpc", "--supernode-rpc", url)) + require.Equal(t, url, cfg.SuperRPC) }) } @@ -180,8 +180,8 @@ func TestGameTypes(t *testing.T) { func TestMultipleGameTypes(t *testing.T) { t.Run("WithAllOptions", func(t *testing.T) { argsMap := requiredArgs(gameTypes.CannonGameType) - // Add Asterisc required flags - addRequiredAsteriscArgs(argsMap) + // Add cannon-kona required flags + addRequiredCannonKonaArgs(argsMap) args := toArgList(argsMap) // Add extra game types (cannon is already specified) args = append(args, @@ -189,9 +189,9 @@ func TestMultipleGameTypes(t *testing.T) { args = append(args, "--game-types", gameTypes.PermissionedGameType.String()) args = append(args, - "--game-types", gameTypes.AsteriscGameType.String()) + "--game-types", gameTypes.CannonKonaGameType.String()) cfg := configForArgs(t, args) - require.Equal(t, []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.AlphabetGameType, gameTypes.PermissionedGameType, gameTypes.AsteriscGameType}, cfg.GameTypes) + require.Equal(t, []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.AlphabetGameType, gameTypes.PermissionedGameType, gameTypes.CannonKonaGameType}, cfg.GameTypes) }) t.Run("WithSomeOptions", func(t *testing.T) { argsMap := requiredArgs(gameTypes.CannonGameType) @@ -367,160 +367,6 @@ func TestMinUpdateInterval(t *testing.T) { }) } -func TestAsteriscOpProgramRequiredArgs(t *testing.T) { - gameType := gameTypes.AsteriscGameType - t.Run(fmt.Sprintf("TestAsteriscServer-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-server")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag asterisc-server is required", addRequiredArgsExcept(gameType, "--asterisc-server")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-server", "--asterisc-server=./op-program")) - require.Equal(t, "./op-program", cfg.Asterisc.Server) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestate-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-prestate")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-prestates-url or asterisc-prestate is required", addRequiredArgsExcept(gameType, "--asterisc-prestate")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-prestate", "--asterisc-prestate=./pre.json")) - require.Equal(t, "./pre.json", cfg.AsteriscAbsolutePreState) - }) - }) - - t.Run(fmt.Sprintf("TestPrestateBaseURL-%v", gameType), func(t *testing.T) { - allPrestateOptions := []string{"--prestates-url", "--asterisc-prestates-url", "--asterisc-prestate"} - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExceptArr(gameTypes.AlphabetGameType, allPrestateOptions)) - }) - - t.Run("NotRequiredIfAsteriscPrestatesBaseURLSet", func(t *testing.T) { - configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--asterisc-prestates-url=http://localhost/foo")) - }) - - t.Run("AsteriscPrestatesBaseURLTakesPrecedence", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--asterisc-prestates-url=http://localhost/foo", "--prestates-url=http://localhost/bar")) - require.Equal(t, "http://localhost/foo", cfg.AsteriscAbsolutePreStateBaseURL.String()) - }) - - t.Run("RequiredIfAsteriscPrestatesBaseURLNotSet", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-prestates-url or asterisc-prestate is required", addRequiredArgsExceptArr(gameType, allPrestateOptions)) - }) - - t.Run("Invalid", func(t *testing.T) { - verifyArgsInvalid(t, "invalid prestates-url (:foo/bar)", addRequiredArgsExceptArr(gameType, allPrestateOptions, "--prestates-url=:foo/bar")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--prestates-url=http://localhost/foo")) - require.Equal(t, "http://localhost/foo", cfg.AsteriscAbsolutePreStateBaseURL.String()) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestateBaseURL-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-prestates-url")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-prestates-url or asterisc-prestate is required", addRequiredArgsExcept(gameType, "--asterisc-prestate")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-prestates-url", "--asterisc-prestates-url=http://localhost/bar")) - require.Equal(t, "http://localhost/bar", cfg.AsteriscAbsolutePreStateBaseURL.String()) - }) - }) -} - -func TestAsteriscKonaRequiredArgs(t *testing.T) { - gameType := gameTypes.AsteriscKonaGameType - t.Run(fmt.Sprintf("TestAsteriscServer-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-server")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag asterisc-kona-server is required", addRequiredArgsExcept(gameType, "--asterisc-kona-server")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-kona-server", "--asterisc-kona-server=./kona-host")) - require.Equal(t, "./kona-host", cfg.AsteriscKona.Server) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestate-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-prestate")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-kona-prestates-url or asterisc-kona-prestate is required", addRequiredArgsExcept(gameType, "--asterisc-kona-prestate")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-kona-prestate", "--asterisc-kona-prestate=./pre.json")) - require.Equal(t, "./pre.json", cfg.AsteriscKonaAbsolutePreState) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePrestateBaseURL-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-prestates-url")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-kona-prestates-url or asterisc-kona-prestate is required", addRequiredArgsExcept(gameType, "--asterisc-kona-prestate")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-kona-prestates-url", "--asterisc-kona-prestates-url=http://localhost/bar")) - require.Equal(t, "http://localhost/bar", cfg.AsteriscKonaAbsolutePreStateBaseURL.String()) - }) - }) - - t.Run(fmt.Sprintf("TestPrestateBaseURL-%v", gameType), func(t *testing.T) { - allPrestateOptions := []string{"--prestates-url", "--asterisc-kona-prestates-url", "--asterisc-kona-prestate"} - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExceptArr(gameTypes.AlphabetGameType, allPrestateOptions)) - }) - - t.Run("NotRequiredIfAsteriscKonaPrestatesBaseURLSet", func(t *testing.T) { - configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--asterisc-kona-prestates-url=http://localhost/foo")) - }) - - t.Run("AsteriscKonaPrestatesBaseURLTakesPrecedence", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--asterisc-kona-prestates-url=http://localhost/foo", "--prestates-url=http://localhost/bar")) - require.Equal(t, "http://localhost/foo", cfg.AsteriscKonaAbsolutePreStateBaseURL.String()) - }) - - t.Run("RequiredIfAsteriscKonaPrestatesBaseURLNotSet", func(t *testing.T) { - verifyArgsInvalid(t, "flag prestates-url/asterisc-kona-prestates-url or asterisc-kona-prestate is required", addRequiredArgsExceptArr(gameType, allPrestateOptions)) - }) - - t.Run("Invalid", func(t *testing.T) { - verifyArgsInvalid(t, "invalid prestates-url (:foo/bar)", addRequiredArgsExceptArr(gameType, allPrestateOptions, "--prestates-url=:foo/bar")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExceptArr(gameType, allPrestateOptions, "--prestates-url=http://localhost/foo")) - require.Equal(t, "http://localhost/foo", cfg.AsteriscKonaAbsolutePreStateBaseURL.String()) - }) - }) -} - // validateCustomNetworkFlagsProhibitedWithNetworkFlag ensures custom network flags are not used simultaneously with the network flag. // It validates disallowed flag combinations for a given game type and game type prefix configuration. func validateCustomNetworkFlagsProhibitedWithNetworkFlag(t *testing.T, gameType gameTypes.GameType, gameTypeForFlagPrefix gameTypes.GameType, customNetworkFlag string) { @@ -563,135 +409,6 @@ func validateCustomNetworkFlagsProhibitedWithNetworkFlag(t *testing.T, gameType } } -func TestAsteriscBaseRequiredArgs(t *testing.T) { - for _, gameType := range []gameTypes.GameType{gameTypes.AsteriscGameType, gameTypes.AsteriscKonaGameType} { - gameType := gameType - t.Run(fmt.Sprintf("TestAsteriscBin-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-bin")) - }) - - t.Run("Required", func(t *testing.T) { - verifyArgsInvalid(t, "flag asterisc-bin is required", addRequiredArgsExcept(gameType, "--asterisc-bin")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--asterisc-bin", "--asterisc-bin=./asterisc")) - require.Equal(t, "./asterisc", cfg.Asterisc.VmBin) - }) - }) - - t.Run(fmt.Sprintf("TestL2Rpc-%v", gameType), func(t *testing.T) { - t.Run("RequiredForAsteriscTrace", func(t *testing.T) { - verifyArgsInvalid(t, "flag l2-eth-rpc is required", addRequiredArgsExcept(gameType, "--l2-eth-rpc")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgs(gameType)) - require.Equal(t, []string{l2EthRpc}, cfg.L2Rpcs) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", gameType), func(t *testing.T) { - t.Run("UsesDefault", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgs(gameType)) - require.Equal(t, config.DefaultAsteriscSnapshotFreq, cfg.Asterisc.SnapshotFreq) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgs(gameType, "--asterisc-snapshot-freq=1234")) - require.Equal(t, uint(1234), cfg.Asterisc.SnapshotFreq) - }) - - t.Run("Invalid", func(t *testing.T) { - verifyArgsInvalid(t, "invalid value \"abc\" for flag -asterisc-snapshot-freq", - addRequiredArgs(gameType, "--asterisc-snapshot-freq=abc")) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", gameType), func(t *testing.T) { - t.Run("UsesDefault", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgs(gameType)) - require.Equal(t, config.DefaultAsteriscInfoFreq, cfg.Asterisc.InfoFreq) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgs(gameType, "--asterisc-info-freq=1234")) - require.Equal(t, uint(1234), cfg.Asterisc.InfoFreq) - }) - - t.Run("Invalid", func(t *testing.T) { - verifyArgsInvalid(t, "invalid value \"abc\" for flag -asterisc-info-freq", - addRequiredArgs(gameType, "--asterisc-info-freq=abc")) - }) - }) - - t.Run(fmt.Sprintf("TestRequireEitherNetworkOrRollupAndGenesis-%v", gameType), func(t *testing.T) { - verifyArgsInvalid( - t, - fmt.Sprintf("flag network or rollup-config/%s-rollup-config and l2-genesis/%s-l2-genesis is required", gameType, gameType), - addRequiredArgsExcept(gameType, "--network")) - verifyArgsInvalid( - t, - fmt.Sprintf("flag network or rollup-config/%s-rollup-config and l2-genesis/%s-l2-genesis is required", gameType, gameType), - addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json")) - verifyArgsInvalid( - t, - fmt.Sprintf("flag network or rollup-config/%s-rollup-config and l2-genesis/%s-l2-genesis is required", gameType, gameType), - addRequiredArgsExcept(gameType, "--network", "--l2-genesis=gensis.json")) - }) - - validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, gameType, gameTypes.AsteriscKonaGameType, "asterisc-kona-l2-custom") - - t.Run(fmt.Sprintf("TestNetwork-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--network")) - }) - - t.Run("NotRequiredWhenRollupAndGenesisSpecified", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameType, "--network", - "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) - }) - - t.Run("NotRequiredWhenNetworkSpecified", func(t *testing.T) { - args := requiredArgs(gameType) - delete(args, "--network") - delete(args, "--game-factory-address") - args["--network"] = "op-sepolia" - cfg := configForArgs(t, toArgList(args)) - require.Equal(t, []string{"op-sepolia"}, cfg.Asterisc.Networks) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--network", testNetwork)) - require.Equal(t, []string{testNetwork}, cfg.Asterisc.Networks) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscRollupConfig-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-rollup-config")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) - require.Equal(t, []string{"rollup.json"}, cfg.Asterisc.RollupConfigPaths) - }) - }) - - t.Run(fmt.Sprintf("TestL2Genesis-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--l2-genesis")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) - require.Equal(t, []string{"genesis.json"}, cfg.Asterisc.L2GenesisPaths) - }) - }) - } -} - func TestAlphabetRequiredArgs(t *testing.T) { t.Run(fmt.Sprintf("TestL2Rpc-%v", gameTypes.AlphabetGameType), func(t *testing.T) { t.Run("RequiredForAlphabetTrace", func(t *testing.T) { @@ -942,93 +659,6 @@ func TestSuperCannonKonaCustomConfigArgs(t *testing.T) { }) } -func TestSuperAsteriscKonaCustomConfigArgs(t *testing.T) { - for _, gameType := range []gameTypes.GameType{gameTypes.SuperAsteriscKonaGameType} { - gameType := gameType - - t.Run(fmt.Sprintf("TestRequireEitherAsteriscKonaNetworkOrRollupAndGenesisAndDepset-%v", gameType), func(t *testing.T) { - expectedErrorMessage := "flag network or rollup-config/asterisc-kona-rollup-config, l2-genesis/asterisc-kona-l2-genesis and depset-config/asterisc-kona-depset-config is required" - // Missing all - verifyArgsInvalid( - t, - expectedErrorMessage, - addRequiredArgsExcept(gameType, "--network")) - // Missing l2-genesis - verifyArgsInvalid( - t, - expectedErrorMessage, - addRequiredArgsExcept(gameType, "--network", "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-depset-config=depset.json")) - // Missing rollup-config - verifyArgsInvalid( - t, - expectedErrorMessage, - addRequiredArgsExcept(gameType, "--network", "--asterisc-kona-l2-genesis=gensis.json", "--asterisc-kona-depset-config=depset.json")) - // Missing depset-config - verifyArgsInvalid( - t, - expectedErrorMessage, - addRequiredArgsExcept(gameType, "--network", "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=gensis.json")) - }) - - validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, gameType, gameTypes.AsteriscKonaGameType, "asterisc-kona-l2-custom") - - t.Run(fmt.Sprintf("TestNetwork-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredWhenRollupGenesisAndDepsetIsSpecified", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameType, "--network", - "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=genesis.json", "--asterisc-kona-depset-config=depset.json")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--network", testNetwork)) - require.Equal(t, []string{testNetwork}, cfg.AsteriscKona.Networks) - }) - }) - - t.Run(fmt.Sprintf("TestSetAsteriscL2ChainId-%v", gameType), func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", - "--asterisc-kona-rollup-config=rollup.json", - "--asterisc-kona-l2-genesis=genesis.json", - "--asterisc-kona-depset-config=depset.json", - "--asterisc-kona-l2-custom")) - require.True(t, cfg.AsteriscKona.L2Custom) - }) - - t.Run(fmt.Sprintf("TestAsteriscRollupConfig-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-rollup-config")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", - "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=genesis.json", "--asterisc-kona-depset-config=depset.json")) - require.Equal(t, []string{"rollup.json"}, cfg.AsteriscKona.RollupConfigPaths) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscL2Genesis-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-l2-genesis")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=genesis.json", "--asterisc-kona-depset-config=depset.json")) - require.Equal(t, []string{"genesis.json"}, cfg.AsteriscKona.L2GenesisPaths) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscDepsetConfig-%v", gameType), func(t *testing.T) { - t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept(gameTypes.AlphabetGameType, "--asterisc-kona-depset-config")) - }) - - t.Run("Valid", func(t *testing.T) { - cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=genesis.json", "--asterisc-kona-depset-config=depset.json")) - require.Equal(t, "depset.json", cfg.AsteriscKona.DepsetConfigPath) - }) - }) - } -} - func TestCannonRequiredArgs(t *testing.T) { for _, gameType := range []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.PermissionedGameType, gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType} { gameType := gameType @@ -1182,12 +812,6 @@ func TestDepsetConfig(t *testing.T) { "flag network or rollup-config/cannon-kona-rollup-config, l2-genesis/cannon-kona-l2-genesis and depset-config/cannon-kona-depset-config is required", addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) }) - } else if gameType == gameTypes.SuperAsteriscKonaGameType { - t.Run("Required-"+gameType.String(), func(t *testing.T) { - verifyArgsInvalid(t, - "flag network or rollup-config/asterisc-kona-rollup-config, l2-genesis/asterisc-kona-l2-genesis and depset-config/asterisc-kona-depset-config is required", - addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) - }) } else { t.Run("NotRequired-"+gameType.String(), func(t *testing.T) { cfg := configForArgs(t, addRequiredArgsExcept(gameType, "--network", "--rollup-config=rollup.json", "--l2-genesis=genesis.json")) @@ -1216,7 +840,7 @@ func TestRollupRpc(t *testing.T) { for _, gameType := range gameTypes.SupportedGameTypes { gameType := gameType - if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperAsteriscKonaGameType || gameType == gameTypes.SuperCannonKonaGameType { + if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperCannonKonaGameType { t.Run(fmt.Sprintf("NotRequiredFor-%v", gameType), func(t *testing.T) { configForArgs(t, addRequiredArgsExcept(gameType, "--rollup-rpc")) }) @@ -1399,16 +1023,10 @@ func requiredArgs(gameType gameTypes.GameType) map[string]string { addRequiredCannonArgs(args) case gameTypes.CannonKonaGameType: addRequiredCannonKonaArgs(args) - case gameTypes.AsteriscGameType: - addRequiredAsteriscArgs(args) - case gameTypes.AsteriscKonaGameType: - addRequiredAsteriscKonaArgs(args) case gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType: addRequiredSuperCannonArgs(args) case gameTypes.SuperCannonKonaGameType: addRequiredSuperCannonKonaArgs(args) - case gameTypes.SuperAsteriscKonaGameType: - addRequiredSuperAsteriscKonaArgs(args) case gameTypes.OptimisticZKGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType: addRequiredOutputRootArgs(args) } @@ -1417,7 +1035,7 @@ func requiredArgs(gameType gameTypes.GameType) map[string]string { func addRequiredSuperCannonArgs(args map[string]string) { addRequiredCannonBaseArgs(args) - args["--supervisor-rpc"] = supervisorRpc + args["--supernode-rpc"] = superRpc } func addRequiredCannonArgs(args map[string]string) { @@ -1450,28 +1068,7 @@ func addRequiredCannonKonaBaseArgs(args map[string]string) { func addRequiredSuperCannonKonaArgs(args map[string]string) { addRequiredCannonKonaBaseArgs(args) - args["--supervisor-rpc"] = supervisorRpc -} - -func addRequiredAsteriscArgs(args map[string]string) { - addRequiredOutputRootArgs(args) - args["--network"] = network - args["--asterisc-bin"] = asteriscBin - args["--asterisc-server"] = asteriscServer - args["--asterisc-prestate"] = asteriscPreState -} - -func addRequiredAsteriscKonaArgs(args map[string]string) { - addRequiredOutputRootArgs(args) - args["--network"] = network - args["--asterisc-bin"] = asteriscBin - args["--asterisc-kona-server"] = asteriscServer - args["--asterisc-kona-prestate"] = asteriscPreState -} - -func addRequiredSuperAsteriscKonaArgs(args map[string]string) { - addRequiredAsteriscKonaArgs(args) - args["--supervisor-rpc"] = supervisorRpc + args["--supernode-rpc"] = superRpc } func toArgList(req map[string]string) []string { diff --git a/op-challenger/cmd/run_trace.go b/op-challenger/cmd/run_trace.go index e2fee2e800f3c..d5bf5b1d2bc72 100644 --- a/op-challenger/cmd/run_trace.go +++ b/op-challenger/cmd/run_trace.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "strings" + "time" "github.com/ethereum-optimism/optimism/op-challenger/flags" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" @@ -43,11 +44,12 @@ func RunTrace(ctx *cli.Context, _ context.CancelCauseFunc) (cliapp.Lifecycle, er runConfigs = append(runConfigs, runner.RunConfig{GameType: gameType}) } } - return runner.NewRunner(logger, cfg, runConfigs), nil + vmTimeout := ctx.Duration(VMTimeoutFlag.Name) + return runner.NewRunner(logger, cfg, runConfigs, vmTimeout), nil } func runTraceFlags() []cli.Flag { - return append(flags.Flags, RunTraceRunFlag) + return append(flags.Flags, RunTraceRunFlag, VMTimeoutFlag) } var RunTraceCommand = &cli.Command{ @@ -58,17 +60,25 @@ var RunTraceCommand = &cli.Command{ Flags: runTraceFlags(), } +const DefaultVMTimeout = 3 * time.Hour + var ( RunTraceRunFlag = &cli.StringSliceFlag{ Name: "run", Usage: "Specify a trace to run. Format is gameType/name/prestateHash where " + - "gameType is the game type to use with the prestate (e.g cannon or asterisc-kona), " + + "gameType is the game type to use with the prestate (e.g cannon or cannon-kona), " + "name is an arbitrary name for the prestate to use when reporting metrics and" + "prestateHash is the hex encoded absolute prestate commitment to use. " + "If name is omitted the game type name is used." + "If the prestateHash is omitted, the absolute prestate hash used for new games on-chain.", EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "RUN"), } + VMTimeoutFlag = &cli.DurationFlag{ + Name: "vm-timeout", + Usage: fmt.Sprintf("Maximum duration for VM execution per run. Default is %s. Set to 0 to disable timeout.", DefaultVMTimeout), + EnvVars: opservice.PrefixEnvVar(flags.EnvVarPrefix, "VM_TIMEOUT"), + Value: DefaultVMTimeout, + } ) func parseRunArgs(args []string) ([]runner.RunConfig, error) { diff --git a/op-challenger/cmd/run_trace_test.go b/op-challenger/cmd/run_trace_test.go index a2b5b664446da..bff4d0c66516d 100644 --- a/op-challenger/cmd/run_trace_test.go +++ b/op-challenger/cmd/run_trace_test.go @@ -18,7 +18,7 @@ func TestParseRunArg(t *testing.T) { }{ {arg: "unknown/test1/0x1234", err: gameTypes.ErrUnknownGameType}, {arg: "cannon", expected: runner.RunConfig{GameType: gameTypes.CannonGameType, Name: gameTypes.CannonGameType.String()}}, - {arg: "asterisc", expected: runner.RunConfig{GameType: gameTypes.AsteriscGameType, Name: gameTypes.AsteriscGameType.String()}}, + {arg: "cannon-kona", expected: runner.RunConfig{GameType: gameTypes.CannonKonaGameType, Name: gameTypes.CannonKonaGameType.String()}}, {arg: "cannon/test1", expected: runner.RunConfig{GameType: gameTypes.CannonGameType, Name: "test1"}}, {arg: "cannon/test1/0x1234", expected: runner.RunConfig{GameType: gameTypes.CannonGameType, Name: "test1", Prestate: common.HexToHash("0x1234")}}, {arg: "cannon/test1/0xinvalid", err: ErrInvalidPrestateHash}, diff --git a/op-challenger/config/config.go b/op-challenger/config/config.go index fa37498de705e..78c2e0b041bd7 100644 --- a/op-challenger/config/config.go +++ b/op-challenger/config/config.go @@ -12,6 +12,7 @@ import ( gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum/go-ethereum/common" ) @@ -23,6 +24,7 @@ var ( ErrMissingL2Rpc = errors.New("missing L2 rpc url") ErrMissingCannonAbsolutePreState = errors.New("missing cannon absolute pre-state") ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") + ErrMissingL1RPCKind = errors.New("missing l1 eth rpc kind") ErrMissingL1Beacon = errors.New("missing l1 beacon url") ErrMissingGameFactoryAddress = errors.New("missing game factory address") ErrMissingCannonSnapshotFreq = errors.New("missing cannon snapshot freq") @@ -32,24 +34,14 @@ var ( ErrMissingCannonKonaInfoFreq = errors.New("missing cannon kona info freq") ErrMissingDepsetConfig = errors.New("missing network or depset config path") - ErrMissingRollupRpc = errors.New("missing rollup rpc url") - ErrMissingSupervisorRpc = errors.New("missing supervisor rpc url") - - ErrMissingAsteriscAbsolutePreState = errors.New("missing asterisc absolute pre-state") - ErrMissingAsteriscSnapshotFreq = errors.New("missing asterisc snapshot freq") - ErrMissingAsteriscInfoFreq = errors.New("missing asterisc info freq") - - ErrMissingAsteriscKonaAbsolutePreState = errors.New("missing asterisc kona absolute pre-state") - ErrMissingAsteriscKonaSnapshotFreq = errors.New("missing asterisc kona snapshot freq") - ErrMissingAsteriscKonaInfoFreq = errors.New("missing asterisc kona info freq") + ErrMissingRollupRpc = errors.New("missing rollup rpc url") + ErrMissingSuperRpc = errors.New("missing super rpc url") ) const ( - DefaultPollInterval = time.Second * 12 - DefaultCannonSnapshotFreq = uint(1_000_000_000) - DefaultCannonInfoFreq = uint(10_000_000) - DefaultAsteriscSnapshotFreq = uint(1_000_000_000) - DefaultAsteriscInfoFreq = uint(10_000_000) + DefaultPollInterval = time.Second * 12 + DefaultCannonSnapshotFreq = uint(1_000_000_000) + DefaultCannonInfoFreq = uint(10_000_000) // DefaultGameWindow is the default maximum time duration in the past // that the challenger will look for games to progress. // The default value is 28 days. The worst case duration for a game is 16 days @@ -65,16 +57,17 @@ const ( // This also contains config options for auxiliary services. // It is used to initialize the challenger. type Config struct { - L1EthRpc string // L1 RPC Url - L1Beacon string // L1 Beacon API Url - GameFactoryAddress common.Address // Address of the dispute game factory - GameAllowlist []common.Address // Allowlist of fault game addresses - GameWindow time.Duration // Maximum time duration to look for games to progress - Datadir string // Data Directory - MaxConcurrency uint // Maximum number of threads to use when progressing games - PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider - AllowInvalidPrestate bool // Whether to allow responding to games where the prestate does not match - MinUpdateInterval time.Duration // Minimum duration the L1 head block time must advance before scheduling a new update cycle + L1EthRpc string // L1 RPC Url + L1RPCKind sources.RPCProviderKind // L1 RPC kind + L1Beacon string // L1 Beacon API Url + GameFactoryAddress common.Address // Address of the dispute game factory + GameAllowlist []common.Address // Allowlist of fault game addresses + GameWindow time.Duration // Maximum time duration to look for games to progress + Datadir string // Data Directory + MaxConcurrency uint // Maximum number of threads to use when progressing games + PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider + AllowInvalidPrestate bool // Whether to allow responding to games where the prestate does not match + MinUpdateInterval time.Duration // Minimum duration the L1 head block time must advance before scheduling a new update cycle AdditionalBondClaimants []common.Address // List of addresses to claim bonds for in addition to the tx manager sender @@ -82,9 +75,10 @@ type Config struct { GameTypes []gameTypes.GameType // Type of games supported - RollupRpc string // L2 Rollup RPC Url - SupervisorRPC string // L2 supervisor RPC URL - L2Rpcs []string // L2 RPC Url + RollupRpc string // L2 Rollup RPC Url + SuperRPC string // L2 RPC URL for super roots + UseSuperNode bool // Temporary: True to use op-supernode APIs, false for op-supervisor APIs + L2Rpcs []string // L2 RPC Url // Specific to the cannon trace provider Cannon vm.Config @@ -94,14 +88,6 @@ type Config struct { CannonKonaAbsolutePreState string // File to load the absolute pre-state for CannonKona traces from CannonKonaAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for CannonKona traces from - // Specific to the asterisc trace provider - Asterisc vm.Config - AsteriscAbsolutePreState string // File to load the absolute pre-state for Asterisc traces from - AsteriscAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Asterisc traces from - AsteriscKona vm.Config - AsteriscKonaAbsolutePreState string // File to load the absolute pre-state for AsteriscKona traces from - AsteriscKonaAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for AsteriscKona traces from - MaxPendingTx uint64 // Maximum number of pending transactions (0 == no limit) TxMgrConfig txmgr.CLIConfig @@ -126,15 +112,16 @@ func NewInteropConfig( gameFactoryAddress common.Address, l1EthRpc string, l1BeaconApi string, - supervisorRpc string, + superRpc string, l2Rpcs []string, datadir string, supportedGameTypes ...gameTypes.GameType, ) Config { return Config{ L1EthRpc: l1EthRpc, + L1RPCKind: sources.RPCKindStandard, L1Beacon: l1BeaconApi, - SupervisorRPC: supervisorRpc, + SuperRPC: superRpc, L2Rpcs: l2Rpcs, GameFactoryAddress: gameFactoryAddress, MaxConcurrency: uint(runtime.NumCPU()), @@ -170,24 +157,6 @@ func NewInteropConfig( DebugInfo: true, BinarySnapshots: true, }, - Asterisc: vm.Config{ - VmType: gameTypes.AsteriscGameType, - L1: l1EthRpc, - L1Beacon: l1BeaconApi, - L2s: l2Rpcs, - SnapshotFreq: DefaultAsteriscSnapshotFreq, - InfoFreq: DefaultAsteriscInfoFreq, - BinarySnapshots: true, - }, - AsteriscKona: vm.Config{ - VmType: gameTypes.AsteriscKonaGameType, - L1: l1EthRpc, - L1Beacon: l1BeaconApi, - L2s: l2Rpcs, - SnapshotFreq: DefaultAsteriscSnapshotFreq, - InfoFreq: DefaultAsteriscInfoFreq, - BinarySnapshots: true, - }, GameWindow: DefaultGameWindow, } } @@ -203,6 +172,7 @@ func NewConfig( ) Config { return Config{ L1EthRpc: l1EthRpc, + L1RPCKind: sources.RPCKindStandard, L1Beacon: l1BeaconApi, RollupRpc: l2RollupRpc, L2Rpcs: []string{l2EthRpc}, @@ -240,24 +210,6 @@ func NewConfig( DebugInfo: true, BinarySnapshots: true, }, - Asterisc: vm.Config{ - VmType: gameTypes.AsteriscGameType, - L1: l1EthRpc, - L1Beacon: l1BeaconApi, - L2s: []string{l2EthRpc}, - SnapshotFreq: DefaultAsteriscSnapshotFreq, - InfoFreq: DefaultAsteriscInfoFreq, - BinarySnapshots: true, - }, - AsteriscKona: vm.Config{ - VmType: gameTypes.AsteriscKonaGameType, - L1: l1EthRpc, - L1Beacon: l1BeaconApi, - L2s: []string{l2EthRpc}, - SnapshotFreq: DefaultAsteriscSnapshotFreq, - InfoFreq: DefaultAsteriscInfoFreq, - BinarySnapshots: true, - }, GameWindow: DefaultGameWindow, } } @@ -270,6 +222,9 @@ func (c Config) Check() error { if c.L1EthRpc == "" { return ErrMissingL1EthRPC } + if c.L1RPCKind == "" { + return ErrMissingL1RPCKind + } if c.L1Beacon == "" { return ErrMissingL1Beacon } @@ -289,8 +244,8 @@ func (c Config) Check() error { return ErrMaxConcurrencyZero } if c.GameTypeEnabled(gameTypes.SuperCannonGameType) || c.GameTypeEnabled(gameTypes.SuperPermissionedGameType) { - if c.SupervisorRPC == "" { - return ErrMissingSupervisorRpc + if c.SuperRPC == "" { + return ErrMissingSuperRpc } if len(c.Cannon.Networks) == 0 && c.Cannon.DepsetConfigPath == "" { @@ -309,8 +264,8 @@ func (c Config) Check() error { } } if c.GameTypeEnabled(gameTypes.SuperCannonKonaGameType) { - if c.SupervisorRPC == "" { - return ErrMissingSupervisorRpc + if c.SuperRPC == "" { + return ErrMissingSuperRpc } if len(c.CannonKona.Networks) == 0 && c.CannonKona.DepsetConfigPath == "" { @@ -328,43 +283,6 @@ func (c Config) Check() error { return err } } - if c.GameTypeEnabled(gameTypes.AsteriscGameType) { - if c.RollupRpc == "" { - return ErrMissingRollupRpc - } - if err := c.Asterisc.Check(); err != nil { - return fmt.Errorf("asterisc: %w", err) - } - if c.AsteriscAbsolutePreState == "" && c.AsteriscAbsolutePreStateBaseURL == nil { - return ErrMissingAsteriscAbsolutePreState - } - if c.Asterisc.SnapshotFreq == 0 { - return ErrMissingAsteriscSnapshotFreq - } - if c.Asterisc.InfoFreq == 0 { - return ErrMissingAsteriscInfoFreq - } - } - if c.GameTypeEnabled(gameTypes.AsteriscKonaGameType) { - if c.RollupRpc == "" { - return ErrMissingRollupRpc - } - if err := c.validateBaseAsteriscKonaOptions(); err != nil { - return err - } - } - if c.GameTypeEnabled(gameTypes.SuperAsteriscKonaGameType) { - if c.SupervisorRPC == "" { - return ErrMissingSupervisorRpc - } - - if len(c.AsteriscKona.Networks) == 0 && c.AsteriscKona.DepsetConfigPath == "" { - return ErrMissingDepsetConfig - } - if err := c.validateBaseAsteriscKonaOptions(); err != nil { - return err - } - } if c.GameTypeEnabled(gameTypes.OptimisticZKGameType) { if c.RollupRpc == "" { return ErrMissingRollupRpc @@ -418,19 +336,3 @@ func (c Config) validateBaseCannonKonaOptions() error { } return nil } - -func (c Config) validateBaseAsteriscKonaOptions() error { - if err := c.AsteriscKona.Check(); err != nil { - return fmt.Errorf("asterisc kona: %w", err) - } - if c.AsteriscKonaAbsolutePreState == "" && c.AsteriscKonaAbsolutePreStateBaseURL == nil { - return ErrMissingAsteriscKonaAbsolutePreState - } - if c.AsteriscKona.SnapshotFreq == 0 { - return ErrMissingAsteriscKonaSnapshotFreq - } - if c.AsteriscKona.InfoFreq == 0 { - return ErrMissingAsteriscKonaInfoFreq - } - return nil -} diff --git a/op-challenger/config/config_test.go b/op-challenger/config/config_test.go index 86b38b175e177..70485d1f1265d 100644 --- a/op-challenger/config/config_test.go +++ b/op-challenger/config/config_test.go @@ -29,20 +29,9 @@ var ( validDatadir = "/tmp/data" validL2Rpc = "http://localhost:9545" validRollupRpc = "http://localhost:8555" - validSupervisorRpc = "http://localhost/supervisor" + validSuperRpc = "http://localhost/super" - validAsteriscBin = "./bin/asterisc" - validAsteriscOpProgramBin = "./bin/op-program" - validAsteriscNetwork = "mainnet" - validAsteriscAbsolutePreState = "pre.json" - validAsteriscAbsolutePreStateBaseURL, _ = url.Parse("http://localhost/bar/") - - nonExistingFile = "path/to/nonexistent/file" - validAsteriscKonaBin = "./bin/asterisc" - validAsteriscKonaServerBin = "./bin/kona-host" - validAsteriscKonaNetwork = "mainnet" - validAsteriscKonaAbsolutePreState = "pre.json" - validAsteriscKonaAbsolutePreStateBaseURL, _ = url.Parse("http://localhost/bar/") + nonExistingFile = "path/to/nonexistent/file" validCannonKonaBin = "./bin/cannon" validCannonKonaServerBin = "./bin/kona-host" @@ -54,9 +43,6 @@ var singleCannonGameTypes = []gameTypes.GameType{gameTypes.CannonGameType, gameT var superCannonGameTypes = []gameTypes.GameType{gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType} var allCannonGameTypes []gameTypes.GameType var cannonKonaGameTypes = []gameTypes.GameType{gameTypes.CannonKonaGameType, gameTypes.SuperCannonKonaGameType} -var asteriscGameTypes = []gameTypes.GameType{gameTypes.AsteriscGameType} -var asteriscKonaGameTypes = []gameTypes.GameType{gameTypes.AsteriscKonaGameType} -var superAsteriscKonaGameTypes = []gameTypes.GameType{gameTypes.SuperAsteriscKonaGameType} func init() { allCannonGameTypes = append(allCannonGameTypes, singleCannonGameTypes...) @@ -80,7 +66,7 @@ func ensureExists(path string) error { } func applyValidConfigForSuperCannon(t *testing.T, cfg *Config) { - cfg.SupervisorRPC = validSupervisorRpc + cfg.SuperRPC = validSuperRpc applyValidConfigForCannon(t, cfg) } @@ -98,34 +84,6 @@ func applyValidConfigForCannon(t *testing.T, cfg *Config) { cfg.Cannon.Networks = []string{validCannonNetwork} } -func applyValidConfigForAsterisc(t *testing.T, cfg *Config) { - tmpDir := t.TempDir() - vmBin := filepath.Join(tmpDir, validAsteriscBin) - server := filepath.Join(tmpDir, validAsteriscOpProgramBin) - err := ensureExists(vmBin) - require.NoError(t, err) - err = ensureExists(server) - require.NoError(t, err) - cfg.Asterisc.VmBin = vmBin - cfg.Asterisc.Server = server - cfg.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutePreStateBaseURL - cfg.Asterisc.Networks = []string{validAsteriscNetwork} -} - -func applyValidConfigForAsteriscKona(t *testing.T, cfg *Config) { - tmpDir := t.TempDir() - vmBin := filepath.Join(tmpDir, validAsteriscKonaBin) - server := filepath.Join(tmpDir, validAsteriscKonaServerBin) - err := ensureExists(vmBin) - require.NoError(t, err) - err = ensureExists(server) - require.NoError(t, err) - cfg.AsteriscKona.VmBin = vmBin - cfg.AsteriscKona.Server = server - cfg.AsteriscKonaAbsolutePreStateBaseURL = validAsteriscKonaAbsolutePreStateBaseURL - cfg.AsteriscKona.Networks = []string{validAsteriscKonaNetwork} -} - func applyValidConfigForCannonKona(t *testing.T, cfg *Config) { tmpDir := t.TempDir() vmBin := filepath.Join(tmpDir, validCannonKonaBin) @@ -141,15 +99,10 @@ func applyValidConfigForCannonKona(t *testing.T, cfg *Config) { } func applyValidConfigForSuperCannonKona(t *testing.T, cfg *Config) { - cfg.SupervisorRPC = validSupervisorRpc + cfg.SuperRPC = validSuperRpc applyValidConfigForCannonKona(t, cfg) } -func applyValidConfigForSuperAsteriscKona(t *testing.T, cfg *Config) { - cfg.SupervisorRPC = validSupervisorRpc - applyValidConfigForAsteriscKona(t, cfg) -} - func applyValidConfigForOptimisticZK(cfg *Config) { cfg.RollupRpc = validRollupRpc } @@ -168,15 +121,6 @@ func validConfig(t *testing.T, gameType gameTypes.GameType) Config { if gameType == gameTypes.SuperCannonKonaGameType { applyValidConfigForSuperCannonKona(t, &cfg) } - if gameType == gameTypes.AsteriscGameType { - applyValidConfigForAsterisc(t, &cfg) - } - if gameType == gameTypes.AsteriscKonaGameType { - applyValidConfigForAsteriscKona(t, &cfg) - } - if gameType == gameTypes.SuperAsteriscKonaGameType { - applyValidConfigForSuperAsteriscKona(t, &cfg) - } if gameType == gameTypes.OptimisticZKGameType { applyValidConfigForOptimisticZK(&cfg) } @@ -199,12 +143,6 @@ func validConfigWithNoNetworks(t *testing.T, gameType gameTypes.GameType) Config if slices.Contains(cannonKonaGameTypes, gameType) { mutateVmConfig(&cfg.CannonKona) } - if slices.Contains(asteriscGameTypes, gameType) { - mutateVmConfig(&cfg.Asterisc) - } - if slices.Contains(asteriscKonaGameTypes, gameType) { - mutateVmConfig(&cfg.AsteriscKona) - } return cfg } @@ -233,6 +171,12 @@ func TestL1EthRpcRequired(t *testing.T) { require.ErrorIs(t, config.Check(), ErrMissingL1EthRPC) } +func TestL1EthRpcKindRequired(t *testing.T) { + config := validConfig(t, gameTypes.CannonGameType) + config.L1RPCKind = "" + require.ErrorIs(t, config.Check(), ErrMissingL1RPCKind) +} + func TestL1BeaconRequired(t *testing.T) { config := validConfig(t, gameTypes.CannonGameType) config.L1Beacon = "" @@ -518,18 +462,6 @@ func TestDepsetConfig(t *testing.T) { }) } - for _, gameType := range superAsteriscKonaGameTypes { - gameType := gameType - t.Run(fmt.Sprintf("TestAsteriscNetworkOrDepsetConfigRequired-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.Networks = nil - cfg.AsteriscKona.RollupConfigPaths = []string{"foo.json"} - cfg.AsteriscKona.L2GenesisPaths = []string{"genesis.json"} - cfg.AsteriscKona.DepsetConfigPath = "" - require.ErrorIs(t, cfg.Check(), ErrMissingDepsetConfig) - }) - } - for _, gameType := range singleCannonGameTypes { gameType := gameType t.Run(fmt.Sprintf("TestDepsetConfigNotRequired-%v", gameType), func(t *testing.T) { @@ -542,243 +474,6 @@ func TestDepsetConfig(t *testing.T) { require.NoError(t, cfg.Check()) }) } - - for _, gameType := range asteriscKonaGameTypes { - gameType := gameType - t.Run(fmt.Sprintf("TestDepsetConfigNotRequired-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.Networks = nil - cfg.AsteriscKona.RollupConfigPaths = []string{"foo.json"} - cfg.AsteriscKona.L1GenesisPath = "bar.json" - cfg.AsteriscKona.L2GenesisPaths = []string{"genesis.json"} - cfg.AsteriscKona.DepsetConfigPath = "" - require.NoError(t, cfg.Check()) - }) - } -} - -func TestAsteriscRequiredArgs(t *testing.T) { - for _, gameType := range asteriscGameTypes { - gameType := gameType - - t.Run(fmt.Sprintf("TestAsteriscBinRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.Asterisc.VmBin = "" - require.ErrorIs(t, config.Check(), vm.ErrMissingBin) - }) - - t.Run(fmt.Sprintf("TestAsteriscServerRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.Asterisc.Server = "" - require.ErrorIs(t, config.Check(), vm.ErrMissingServer) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePreStateOrBaseURLRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscAbsolutePreState = "" - config.AsteriscAbsolutePreStateBaseURL = nil - require.ErrorIs(t, config.Check(), ErrMissingAsteriscAbsolutePreState) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePreState-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscAbsolutePreState = validAsteriscAbsolutePreState - config.AsteriscAbsolutePreStateBaseURL = nil - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestAsteriscAbsolutePreStateBaseURL-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscAbsolutePreState = "" - config.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutePreStateBaseURL - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestAllowSupplingBothAsteriscAbsolutePreStateAndBaseURL-%v", gameType), func(t *testing.T) { - // Since the prestate base URL might be inherited from the --prestate-urls option, allow overriding it with a specific prestate - config := validConfig(t, gameType) - config.AsteriscAbsolutePreState = validAsteriscAbsolutePreState - config.AsteriscAbsolutePreStateBaseURL = validAsteriscAbsolutePreStateBaseURL - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestL2RpcRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.L2Rpcs = nil - require.ErrorIs(t, config.Check(), ErrMissingL2Rpc) - }) - - t.Run(fmt.Sprintf("TestAsteriscSnapshotFreq-%v", gameType), func(t *testing.T) { - t.Run("MustNotBeZero", func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.SnapshotFreq = 0 - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscSnapshotFreq) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscInfoFreq-%v", gameType), func(t *testing.T) { - t.Run("MustNotBeZero", func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.InfoFreq = 0 - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscInfoFreq) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscNetworkOrRollupConfigRequired-%v", gameType), func(t *testing.T) { - cfg := validConfigWithNoNetworks(t, gameType) - cfg.Asterisc.RollupConfigPaths = nil - require.ErrorIs(t, cfg.Check(), vm.ErrMissingRollupConfig) - }) - - t.Run(fmt.Sprintf("TestAsteriscNetworkOrL2GenesisRequired-%v", gameType), func(t *testing.T) { - cfg := validConfigWithNoNetworks(t, gameType) - cfg.Asterisc.L2GenesisPaths = nil - require.ErrorIs(t, cfg.Check(), vm.ErrMissingL2Genesis) - }) - - t.Run(fmt.Sprintf("MaySpecifyNetworkAndCustomConfigs-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.Networks = []string{validAsteriscNetwork} - cfg.Asterisc.RollupConfigPaths = []string{"foo.json"} - cfg.Asterisc.L2GenesisPaths = []string{"genesis.json"} - require.NoError(t, cfg.Check()) - }) - - t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.Networks = []string{"unknown"} - require.ErrorIs(t, cfg.Check(), vm.ErrNetworkUnknown) - }) - - t.Run(fmt.Sprintf("TestDebugInfoDisabled-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - require.False(t, cfg.Asterisc.DebugInfo) - }) - - t.Run(fmt.Sprintf("TestVMBinExists-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.VmBin = nonExistingFile - require.ErrorIs(t, cfg.Check(), vm.ErrMissingBin) - }) - - t.Run(fmt.Sprintf("TestServerExists-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.Asterisc.Server = nonExistingFile - require.ErrorIs(t, cfg.Check(), vm.ErrMissingServer) - }) - } -} - -func TestAsteriscKonaRequiredArgs(t *testing.T) { - for _, gameType := range asteriscKonaGameTypes { - gameType := gameType - - t.Run(fmt.Sprintf("TestAsteriscKonaBinRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscKona.VmBin = "" - require.ErrorIs(t, config.Check(), vm.ErrMissingBin) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaServerRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscKona.Server = "" - require.ErrorIs(t, config.Check(), vm.ErrMissingServer) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaAbsolutePreStateOrBaseURLRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscKonaAbsolutePreState = "" - config.AsteriscKonaAbsolutePreStateBaseURL = nil - require.ErrorIs(t, config.Check(), ErrMissingAsteriscKonaAbsolutePreState) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaAbsolutePreState-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscKonaAbsolutePreState = validAsteriscKonaAbsolutePreState - config.AsteriscKonaAbsolutePreStateBaseURL = nil - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaAbsolutePreStateBaseURL-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.AsteriscKonaAbsolutePreState = "" - config.AsteriscKonaAbsolutePreStateBaseURL = validAsteriscKonaAbsolutePreStateBaseURL - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestAllowSupplyingBothAsteriscKonaAbsolutePreStateAndBaseURL-%v", gameType), func(t *testing.T) { - // Since the prestate base URL might be inherited from the --prestate-urls option, allow overriding it with a specific prestate - config := validConfig(t, gameType) - config.AsteriscKonaAbsolutePreState = validAsteriscKonaAbsolutePreState - config.AsteriscKonaAbsolutePreStateBaseURL = validAsteriscKonaAbsolutePreStateBaseURL - require.NoError(t, config.Check()) - }) - - t.Run(fmt.Sprintf("TestL2RpcRequired-%v", gameType), func(t *testing.T) { - config := validConfig(t, gameType) - config.L2Rpcs = nil - require.ErrorIs(t, config.Check(), ErrMissingL2Rpc) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaSnapshotFreq-%v", gameType), func(t *testing.T) { - t.Run("MustNotBeZero", func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.SnapshotFreq = 0 - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscKonaSnapshotFreq) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaInfoFreq-%v", gameType), func(t *testing.T) { - t.Run("MustNotBeZero", func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.InfoFreq = 0 - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscKonaInfoFreq) - }) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaNetworkOrRollupConfigRequired-%v", gameType), func(t *testing.T) { - cfg := validConfigWithNoNetworks(t, gameType) - cfg.AsteriscKona.RollupConfigPaths = nil - require.ErrorIs(t, cfg.Check(), vm.ErrMissingRollupConfig) - }) - - t.Run(fmt.Sprintf("TestAsteriscKonaNetworkOrL2GenesisRequired-%v", gameType), func(t *testing.T) { - cfg := validConfigWithNoNetworks(t, gameType) - cfg.AsteriscKona.L2GenesisPaths = nil - require.ErrorIs(t, cfg.Check(), vm.ErrMissingL2Genesis) - }) - - t.Run(fmt.Sprintf("MaySpecifyNetworkAndCustomConfig-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.Networks = []string{validAsteriscKonaNetwork} - cfg.AsteriscKona.RollupConfigPaths = []string{"foo.json"} - cfg.AsteriscKona.L2GenesisPaths = []string{"genesis.json"} - require.NoError(t, cfg.Check()) - }) - - t.Run(fmt.Sprintf("TestNetworkMustBeValid-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.Networks = []string{"unknown"} - require.ErrorIs(t, cfg.Check(), vm.ErrNetworkUnknown) - }) - - t.Run(fmt.Sprintf("TestDebugInfoDisabled-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - require.False(t, cfg.AsteriscKona.DebugInfo) - }) - - t.Run(fmt.Sprintf("TestVMBinExists-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.VmBin = nonExistingFile - require.ErrorIs(t, cfg.Check(), vm.ErrMissingBin) - }) - - t.Run(fmt.Sprintf("TestServerExists-%v", gameType), func(t *testing.T) { - cfg := validConfig(t, gameType) - cfg.AsteriscKona.Server = nonExistingFile - require.ErrorIs(t, cfg.Check(), vm.ErrMissingServer) - }) - } } func TestDatadirRequired(t *testing.T) { @@ -810,7 +505,7 @@ func TestHttpPollInterval(t *testing.T) { func TestRollupRpcRequired(t *testing.T) { for _, gameType := range gameTypes.SupportedGameTypes { gameType := gameType - if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperAsteriscKonaGameType || gameType == gameTypes.SuperCannonKonaGameType { + if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperCannonKonaGameType { continue } t.Run(gameType.String(), func(t *testing.T) { @@ -839,27 +534,21 @@ func TestRollupRpcNotRequiredForInterop(t *testing.T) { config.RollupRpc = "" require.NoError(t, config.Check()) }) - - t.Run("SuperAsteriscKona", func(t *testing.T) { - config := validConfig(t, gameTypes.SuperAsteriscKonaGameType) - config.RollupRpc = "" - require.NoError(t, config.Check()) - }) } -func TestSupervisorRpc(t *testing.T) { +func TestSuperRpc(t *testing.T) { for _, gameType := range gameTypes.SupportedGameTypes { gameType := gameType - if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperAsteriscKonaGameType || gameType == gameTypes.SuperCannonKonaGameType { + if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperPermissionedGameType || gameType == gameTypes.SuperCannonKonaGameType { t.Run("RequiredFor"+gameType.String(), func(t *testing.T) { config := validConfig(t, gameType) - config.SupervisorRPC = "" - require.ErrorIs(t, config.Check(), ErrMissingSupervisorRpc) + config.SuperRPC = "" + require.ErrorIs(t, config.Check(), ErrMissingSuperRpc) }) } else { t.Run("NotRequiredFor"+gameType.String(), func(t *testing.T) { config := validConfig(t, gameType) - config.SupervisorRPC = "" + config.SuperRPC = "" require.NoError(t, config.Check()) }) } @@ -884,29 +573,11 @@ func TestRequireConfigForMultipleGameTypesForCannon(t *testing.T) { require.ErrorIs(t, cfg.Check(), ErrMissingRollupRpc) } -func TestRequireConfigForMultipleGameTypesForAsterisc(t *testing.T) { - cfg := validConfig(t, gameTypes.AsteriscGameType) - cfg.GameTypes = []gameTypes.GameType{gameTypes.AsteriscGameType, gameTypes.AlphabetGameType} - // Set all required options and check its valid - cfg.RollupRpc = validRollupRpc - require.NoError(t, cfg.Check()) - - // Require asterisc specific args - cfg.AsteriscAbsolutePreState = "" - cfg.AsteriscAbsolutePreStateBaseURL = nil - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscAbsolutePreState) - cfg.AsteriscAbsolutePreState = validAsteriscAbsolutePreState - - // Require output asterisc specific args - cfg.RollupRpc = "" - require.ErrorIs(t, cfg.Check(), ErrMissingRollupRpc) -} - -func TestRequireConfigForMultipleGameTypesForCannonAndAsterisc(t *testing.T) { +func TestRequireConfigForMultipleGameTypesForCannonAndCannonKona(t *testing.T) { cfg := validConfig(t, gameTypes.CannonGameType) - applyValidConfigForAsterisc(t, &cfg) + applyValidConfigForCannonKona(t, &cfg) - cfg.GameTypes = []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.AsteriscGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType} + cfg.GameTypes = []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.CannonKonaGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType} // Set all required options and check its valid cfg.RollupRpc = validRollupRpc require.NoError(t, cfg.Check()) @@ -920,18 +591,15 @@ func TestRequireConfigForMultipleGameTypesForCannonAndAsterisc(t *testing.T) { require.NoError(t, err) cfg.Cannon.VmBin = vmBin - // Require asterisc specific args - cfg.AsteriscAbsolutePreState = "" - cfg.AsteriscAbsolutePreStateBaseURL = nil - require.ErrorIs(t, cfg.Check(), ErrMissingAsteriscAbsolutePreState) - cfg.AsteriscAbsolutePreState = validAsteriscAbsolutePreState + // Require cannon-kona specific args + cfg.CannonKonaAbsolutePreState = "" + cfg.CannonKonaAbsolutePreStateBaseURL = nil + require.ErrorIs(t, cfg.Check(), ErrMissingCannonKonaAbsolutePreState) + cfg.CannonKonaAbsolutePreStateBaseURL = validCannonKonaAbsolutePreStateBaseURL - cfg.Asterisc.Server = "" + cfg.CannonKona.Server = "" require.ErrorIs(t, cfg.Check(), vm.ErrMissingServer) - server := filepath.Join(tmpDir, validAsteriscOpProgramBin) - err = ensureExists(server) - require.NoError(t, err) - cfg.Asterisc.Server = server + cfg.CannonKona.Server = vmBin // Check final config is valid require.NoError(t, cfg.Check()) diff --git a/op-challenger/flags/flags.go b/op-challenger/flags/flags.go index d5626fab85514..bf758ca9141d1 100644 --- a/op-challenger/flags/flags.go +++ b/op-challenger/flags/flags.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/flags" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/superchain" @@ -40,11 +41,8 @@ var ( faultDisputeVMs = []gameTypes.GameType{ gameTypes.CannonGameType, gameTypes.CannonKonaGameType, - gameTypes.AsteriscGameType, - gameTypes.AsteriscKonaGameType, gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType, - gameTypes.SuperAsteriscKonaGameType, } // Required Flags L1EthRpcFlag = &cli.StringFlag{ @@ -52,15 +50,25 @@ var ( Usage: "HTTP provider URL for L1.", EnvVars: prefixEnvVars("L1_ETH_RPC"), } + L1RPCProviderKind = &cli.GenericFlag{ + Name: "l1-rpc-kind", + Usage: "The kind of RPC provider, used to inform optimal transactions receipts fetching, and thus reduce costs. Valid options: " + + openum.EnumString(sources.RPCProviderKinds), + EnvVars: prefixEnvVars("L1_RPC_KIND"), + Value: func() *sources.RPCProviderKind { + out := sources.RPCKindStandard + return &out + }(), + } L1BeaconFlag = &cli.StringFlag{ Name: "l1-beacon", Usage: "Address of L1 Beacon API endpoint to use", EnvVars: prefixEnvVars("L1_BEACON"), } - SupervisorRpcFlag = &cli.StringFlag{ - Name: "supervisor-rpc", - Usage: "Provider URL for supervisor RPC", - EnvVars: prefixEnvVars("SUPERVISOR_RPC"), + SuperNodeRpcFlag = &cli.StringFlag{ + Name: "supernode-rpc", + Usage: "Provider URL for supernode roots", + EnvVars: prefixEnvVars("SUPERNODE_RPC"), } RollupRpcFlag = &cli.StringFlag{ Name: "rollup-rpc", @@ -109,7 +117,7 @@ var ( } L2ExperimentalEthRpcFlag = &cli.StringFlag{ Name: "l2-experimental-eth-rpc", - Usage: "L2 Address of L2 JSON-RPC endpoint to use (eth and debug namespace required with execution witness support) (cannon/asterisc game type only)", + Usage: "L2 Address of L2 JSON-RPC endpoint to use (eth and debug namespace required with execution witness support) (cannon game type only)", EnvVars: prefixEnvVars("L2_EXPERIMENTAL_ETH_RPC"), } MaxPendingTransactionsFlag = &cli.Uint64Flag{ @@ -224,51 +232,6 @@ var ( Value: false, Hidden: true, } - AsteriscBinFlag = &cli.StringFlag{ - Name: "asterisc-bin", - Usage: "Path to asterisc executable to use when generating trace data (asterisc game type only)", - EnvVars: prefixEnvVars("ASTERISC_BIN"), - } - AsteriscServerFlag = &cli.StringFlag{ - Name: "asterisc-server", - Usage: "Path to executable to use as pre-image oracle server when generating trace data (asterisc game type only)", - EnvVars: prefixEnvVars("ASTERISC_SERVER"), - } - AsteriscKonaServerFlag = &cli.StringFlag{ - Name: "asterisc-kona-server", - Usage: "Path to kona executable to use as pre-image oracle server when generating trace data (asterisc-kona game type only)", - EnvVars: prefixEnvVars("ASTERISC_KONA_SERVER"), - } - AsteriscKonaL2CustomFlag = &cli.BoolFlag{ - Name: "asterisc-kona-l2-custom", - Usage: "Notify the kona-host that the L2 chain uses custom config to be loaded via the preimage oracle. " + - "WARNING: This is incompatible with on-chain testing and must only be used for testing purposes.", - EnvVars: prefixEnvVars("ASTERISC_KONA_L2_CUSTOM"), - Value: false, - Hidden: true, - } - AsteriscPreStateFlag = &cli.StringFlag{ - Name: "asterisc-prestate", - Usage: "Path to absolute prestate to use when generating trace data (asterisc game type only)", - EnvVars: prefixEnvVars("ASTERISC_PRESTATE"), - } - AsteriscKonaPreStateFlag = &cli.StringFlag{ - Name: "asterisc-kona-prestate", - Usage: "Path to absolute prestate to use when generating trace data (asterisc-kona game type only)", - EnvVars: prefixEnvVars("ASTERISC_KONA_PRESTATE"), - } - AsteriscSnapshotFreqFlag = &cli.UintFlag{ - Name: "asterisc-snapshot-freq", - Usage: "Frequency of asterisc snapshots to generate in VM steps (asterisc game type only)", - EnvVars: prefixEnvVars("ASTERISC_SNAPSHOT_FREQ"), - Value: config.DefaultAsteriscSnapshotFreq, - } - AsteriscInfoFreqFlag = &cli.UintFlag{ - Name: "asterisc-info-freq", - Usage: "Frequency of asterisc info log messages to generate in VM steps (asterisc game type only)", - EnvVars: prefixEnvVars("ASTERISC_INFO_FREQ"), - Value: config.DefaultAsteriscInfoFreq, - } GameWindowFlag = &cli.DurationFlag{ Name: "game-window", Usage: "The time window which the challenger will look for games to progress and claim bonds. " + @@ -310,12 +273,13 @@ var requiredFlags = []cli.Flag{ // optionalFlags is a list of unchecked cli flags var optionalFlags = []cli.Flag{ + L1RPCProviderKind, RollupRpcFlag, NetworkFlag, FactoryAddressFlag, GameTypesFlag, MaxConcurrencyFlag, - SupervisorRpcFlag, + SuperNodeRpcFlag, L2EthRpcFlag, L2ExperimentalEthRpcFlag, MaxPendingTransactionsFlag, @@ -332,14 +296,6 @@ var optionalFlags = []cli.Flag{ CannonKonaServerFlag, CannonKonaPreStateFlag, CannonKonaL2CustomFlag, - AsteriscBinFlag, - AsteriscServerFlag, - AsteriscKonaL2CustomFlag, - AsteriscKonaServerFlag, - AsteriscPreStateFlag, - AsteriscKonaPreStateFlag, - AsteriscSnapshotFreqFlag, - AsteriscInfoFreqFlag, GameWindowFlag, SelectiveClaimResolutionFlag, UnsafeAllowInvalidPrestate, @@ -394,8 +350,8 @@ func CheckCannonBaseFlags(ctx *cli.Context) error { } func CheckSuperCannonFlags(ctx *cli.Context) error { - if !ctx.IsSet(SupervisorRpcFlag.Name) { - return fmt.Errorf("flag %v is required", SupervisorRpcFlag.Name) + if !ctx.IsSet(SuperNodeRpcFlag.Name) { + return fmt.Errorf("flag %v is required", SuperNodeRpcFlag.Name) } if !ctx.IsSet(flags.NetworkFlagName) && !(RollupConfigFlag.IsSet(ctx, gameTypes.CannonGameType) && L2GenesisFlag.IsSet(ctx, gameTypes.CannonGameType) && DepsetConfigFlag.IsSet(ctx, gameTypes.CannonGameType)) { @@ -412,8 +368,8 @@ func CheckSuperCannonFlags(ctx *cli.Context) error { } func CheckSuperCannonKonaFlags(ctx *cli.Context) error { - if !ctx.IsSet(SupervisorRpcFlag.Name) { - return fmt.Errorf("flag %v is required", SupervisorRpcFlag.Name) + if !ctx.IsSet(SuperNodeRpcFlag.Name) { + return fmt.Errorf("flag %v is required", SuperNodeRpcFlag.Name) } if !ctx.IsSet(flags.NetworkFlagName) && !(RollupConfigFlag.IsSet(ctx, gameTypes.CannonKonaGameType) && L2GenesisFlag.IsSet(ctx, gameTypes.CannonKonaGameType) && DepsetConfigFlag.IsSet(ctx, gameTypes.CannonKonaGameType)) { @@ -477,79 +433,6 @@ func CheckCannonKonaFlags(ctx *cli.Context) error { return nil } -func CheckAsteriscBaseFlags(ctx *cli.Context, gameType gameTypes.GameType) error { - if !ctx.IsSet(flags.NetworkFlagName) && - !(RollupConfigFlag.IsSet(ctx, gameType) && L2GenesisFlag.IsSet(ctx, gameType)) { - return fmt.Errorf("flag %v or %v and %v is required", - flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(gameType), L2GenesisFlag.EitherFlagName(gameType)) - } - if ctx.IsSet(flags.NetworkFlagName) && - (RollupConfigFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) || L2GenesisFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) || L1GenesisFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) || ctx.Bool(AsteriscKonaL2CustomFlag.Name)) { - return fmt.Errorf("flag %v can not be used with %v, %v, %v or %v", - flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), L2GenesisFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), L1GenesisFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), AsteriscKonaL2CustomFlag.Name) - } - if !ctx.IsSet(AsteriscBinFlag.Name) { - return fmt.Errorf("flag %s is required", AsteriscBinFlag.Name) - } - return nil -} - -func CheckAsteriscFlags(ctx *cli.Context) error { - if err := checkOutputProviderFlags(ctx); err != nil { - return err - } - if err := CheckAsteriscBaseFlags(ctx, gameTypes.AsteriscGameType); err != nil { - return err - } - if !ctx.IsSet(AsteriscServerFlag.Name) { - return fmt.Errorf("flag %s is required", AsteriscServerFlag.Name) - } - if !PreStatesURLFlag.IsSet(ctx, gameTypes.AsteriscGameType) && !ctx.IsSet(AsteriscPreStateFlag.Name) { - return fmt.Errorf("flag %s or %s is required", PreStatesURLFlag.EitherFlagName(gameTypes.AsteriscGameType), AsteriscPreStateFlag.Name) - } - return nil -} - -func CheckAsteriscKonaFlags(ctx *cli.Context) error { - if err := checkOutputProviderFlags(ctx); err != nil { - return err - } - if err := CheckAsteriscBaseFlags(ctx, gameTypes.AsteriscKonaGameType); err != nil { - return err - } - if !ctx.IsSet(AsteriscKonaServerFlag.Name) { - return fmt.Errorf("flag %s is required", AsteriscKonaServerFlag.Name) - } - if !PreStatesURLFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) && !ctx.IsSet(AsteriscKonaPreStateFlag.Name) { - return fmt.Errorf("flag %s or %s is required", PreStatesURLFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), AsteriscKonaPreStateFlag.Name) - } - return nil -} - -func CheckSuperAsteriscKonaFlags(ctx *cli.Context) error { - if !ctx.IsSet(SupervisorRpcFlag.Name) { - return fmt.Errorf("flag %v is required", SupervisorRpcFlag.Name) - } - if !ctx.IsSet(flags.NetworkFlagName) && - !(RollupConfigFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) && L2GenesisFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) && DepsetConfigFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType)) { - return fmt.Errorf("flag %v or %v, %v and %v is required", - flags.NetworkFlagName, - RollupConfigFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), - L2GenesisFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), - DepsetConfigFlag.EitherFlagName(gameTypes.AsteriscKonaGameType)) - } - if err := CheckAsteriscBaseFlags(ctx, gameTypes.AsteriscKonaGameType); err != nil { - return err - } - if !ctx.IsSet(AsteriscKonaServerFlag.Name) { - return fmt.Errorf("flag %s is required", AsteriscKonaServerFlag.Name) - } - if !PreStatesURLFlag.IsSet(ctx, gameTypes.AsteriscKonaGameType) && !ctx.IsSet(AsteriscKonaPreStateFlag.Name) { - return fmt.Errorf("flag %s or %s is required", PreStatesURLFlag.EitherFlagName(gameTypes.AsteriscKonaGameType), AsteriscKonaPreStateFlag.Name) - } - return nil -} - func CheckRequired(ctx *cli.Context, types []gameTypes.GameType) error { for _, f := range requiredFlags { if !ctx.IsSet(f.Names()[0]) { @@ -569,14 +452,6 @@ func CheckRequired(ctx *cli.Context, types []gameTypes.GameType) error { if err := CheckCannonKonaFlags(ctx); err != nil { return err } - case gameTypes.AsteriscGameType: - if err := CheckAsteriscFlags(ctx); err != nil { - return err - } - case gameTypes.AsteriscKonaGameType: - if err := CheckAsteriscKonaFlags(ctx); err != nil { - return err - } case gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType: if err := CheckSuperCannonFlags(ctx); err != nil { return err @@ -585,10 +460,6 @@ func CheckRequired(ctx *cli.Context, types []gameTypes.GameType) error { if err := CheckSuperCannonKonaFlags(ctx); err != nil { return err } - case gameTypes.SuperAsteriscKonaGameType: - if err := CheckSuperAsteriscKonaFlags(ctx); err != nil { - return err - } case gameTypes.OptimisticZKGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType: if err := checkOutputProviderFlags(ctx); err != nil { return err @@ -722,14 +593,6 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro if err != nil { return nil, err } - asteriscPreStatesURL, err := getPrestatesUrl(gameTypes.AsteriscGameType) - if err != nil { - return nil, err - } - asteriscKonaPreStatesURL, err := getPrestatesUrl(gameTypes.AsteriscKonaGameType) - if err != nil { - return nil, err - } networks := ctx.StringSlice(flags.NetworkFlagName) l1EthRpc := ctx.String(L1EthRpcFlag.Name) l1Beacon := ctx.String(L1BeaconFlag.Name) @@ -738,6 +601,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro return &config.Config{ // Required Flags L1EthRpc: l1EthRpc, + L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(L1RPCProviderKind.Name))), L1Beacon: l1Beacon, GameTypes: enabledGameTypes, GameFactoryAddress: gameFactoryAddress, @@ -750,7 +614,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro MinUpdateInterval: ctx.Duration(MinUpdateInterval.Name), AdditionalBondClaimants: claimants, RollupRpc: ctx.String(RollupRpcFlag.Name), - SupervisorRPC: ctx.String(SupervisorRpcFlag.Name), + SuperRPC: ctx.String(SuperNodeRpcFlag.Name), Cannon: vm.Config{ VmType: gameTypes.CannonGameType, L1: l1EthRpc, @@ -794,51 +658,12 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro CannonKonaAbsolutePreState: ctx.String(CannonKonaPreStateFlag.Name), CannonKonaAbsolutePreStateBaseURL: cannonKonaPreStatesURL, Datadir: ctx.String(DatadirFlag.Name), - Asterisc: vm.Config{ - VmType: gameTypes.AsteriscGameType, - L1: l1EthRpc, - L1Beacon: l1Beacon, - L2s: l2Rpcs, - L2Experimental: l2Experimental, - VmBin: ctx.String(AsteriscBinFlag.Name), - Server: ctx.String(AsteriscServerFlag.Name), - Networks: networks, - RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, gameTypes.AsteriscGameType), - L1GenesisPath: L1GenesisFlag.String(ctx, gameTypes.AsteriscGameType), - L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, gameTypes.AsteriscGameType), - DepsetConfigPath: DepsetConfigFlag.String(ctx, gameTypes.AsteriscGameType), - SnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), - InfoFreq: ctx.Uint(AsteriscInfoFreqFlag.Name), - BinarySnapshots: true, - }, - AsteriscAbsolutePreState: ctx.String(AsteriscPreStateFlag.Name), - AsteriscAbsolutePreStateBaseURL: asteriscPreStatesURL, - AsteriscKona: vm.Config{ - VmType: gameTypes.AsteriscKonaGameType, - L1: l1EthRpc, - L1Beacon: l1Beacon, - L2s: l2Rpcs, - L2Experimental: l2Experimental, - VmBin: ctx.String(AsteriscBinFlag.Name), - Server: ctx.String(AsteriscKonaServerFlag.Name), - Networks: networks, - L2Custom: ctx.Bool(AsteriscKonaL2CustomFlag.Name), - RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, gameTypes.AsteriscKonaGameType), - L1GenesisPath: L1GenesisFlag.String(ctx, gameTypes.AsteriscKonaGameType), - L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, gameTypes.AsteriscKonaGameType), - DepsetConfigPath: DepsetConfigFlag.String(ctx, gameTypes.AsteriscKonaGameType), - SnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), - InfoFreq: ctx.Uint(AsteriscInfoFreqFlag.Name), - BinarySnapshots: true, - }, - AsteriscKonaAbsolutePreState: ctx.String(AsteriscKonaPreStateFlag.Name), - AsteriscKonaAbsolutePreStateBaseURL: asteriscKonaPreStatesURL, - TxMgrConfig: txMgrConfig, - MetricsConfig: metricsConfig, - PprofConfig: pprofConfig, - SelectiveClaimResolution: ctx.Bool(SelectiveClaimResolutionFlag.Name), - AllowInvalidPrestate: ctx.Bool(UnsafeAllowInvalidPrestate.Name), - ResponseDelay: ctx.Duration(ResponseDelayFlag.Name), - ResponseDelayAfter: ctx.Uint64(ResponseDelayAfterFlag.Name), + TxMgrConfig: txMgrConfig, + MetricsConfig: metricsConfig, + PprofConfig: pprofConfig, + SelectiveClaimResolution: ctx.Bool(SelectiveClaimResolutionFlag.Name), + AllowInvalidPrestate: ctx.Bool(UnsafeAllowInvalidPrestate.Name), + ResponseDelay: ctx.Duration(ResponseDelayFlag.Name), + ResponseDelayAfter: ctx.Uint64(ResponseDelayAfterFlag.Name), }, nil } diff --git a/op-challenger/game/client/noop_sync.go b/op-challenger/game/client/noop_sync.go new file mode 100644 index 0000000000000..f6e5ffbc0cc61 --- /dev/null +++ b/op-challenger/game/client/noop_sync.go @@ -0,0 +1,16 @@ +package client + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type NoopSyncStatusValidator struct{} + +func (n *NoopSyncStatusValidator) ValidateNodeSynced(_ context.Context, _ eth.BlockID) error { + return nil +} + +var _ types.SyncValidator = (*NoopSyncStatusValidator)(nil) diff --git a/op-challenger/game/client/provider.go b/op-challenger/game/client/provider.go index d827cae977f4e..f4ba49adc3776 100644 --- a/op-challenger/game/client/provider.go +++ b/op-challenger/game/client/provider.go @@ -2,10 +2,11 @@ package client import ( "context" - "errors" "fmt" "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/batching" @@ -13,29 +14,29 @@ import ( "github.com/ethereum/go-ethereum/log" ) -var ErrNotInSync = errors.New("local node too far behind") - type Provider struct { ctx context.Context logger log.Logger cfg *config.Config - l1Client *ethclient.Client + l1Client *sources.L1Client caller *batching.MultiCaller - l2EL *ethclient.Client - rollupClient *sources.RollupClient - syncValidator *RollupSyncStatusValidator - supervisorClient *sources.SupervisorClient - toClose []func() + l2EL *ethclient.Client + rollupClient *sources.RollupClient + syncValidator *RollupSyncStatusValidator + supervisorClient *sources.SupervisorClient + superSyncValidator types.SyncValidator + superNodeClient *sources.SuperNodeClient + toClose []func() } -func NewProvider(ctx context.Context, logger log.Logger, cfg *config.Config, l1Client *ethclient.Client) *Provider { +func NewProvider(ctx context.Context, logger log.Logger, cfg *config.Config, l1Client *sources.L1Client, rpcClient client.RPC) *Provider { return &Provider{ ctx: ctx, logger: logger, cfg: cfg, l1Client: l1Client, - caller: batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize), + caller: batching.NewMultiCaller(rpcClient, batching.DefaultBatchSize), } } @@ -45,7 +46,7 @@ func (c *Provider) Close() { } } -func (c *Provider) L1Client() *ethclient.Client { +func (c *Provider) L1Client() *sources.L1Client { return c.l1Client } @@ -96,12 +97,26 @@ func (c *Provider) RollupClients() (*sources.RollupClient, *RollupSyncStatusVali return rollupClient, c.syncValidator, nil } -func (c *Provider) SuperchainClients() (*sources.SupervisorClient, *SupervisorSyncValidator, error) { - supervisorClient, err := dial.DialSupervisorClientWithTimeout(c.ctx, c.logger, c.cfg.SupervisorRPC) - if err != nil { - return nil, nil, fmt.Errorf("failed to dial supervisor: %w", err) +func (c *Provider) SuperchainClients() (*sources.SupervisorClient, *sources.SuperNodeClient, types.SyncValidator, error) { + if c.supervisorClient != nil || c.superNodeClient != nil { + return c.supervisorClient, c.superNodeClient, c.superSyncValidator, nil + } + if c.cfg.UseSuperNode { + superNodeClient, err := dial.DialSuperNodeClientWithTimeout(c.ctx, c.logger, c.cfg.SuperRPC) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to dial supernode: %w", err) + } + c.superNodeClient = superNodeClient + c.superSyncValidator = &NoopSyncStatusValidator{} + c.toClose = append(c.toClose, superNodeClient.Close) + } else { + supervisorClient, err := dial.DialSupervisorClientWithTimeout(c.ctx, c.logger, c.cfg.SuperRPC) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to dial supervisor: %w", err) + } + c.supervisorClient = supervisorClient + c.superSyncValidator = NewSupervisorSyncValidator(supervisorClient) + c.toClose = append(c.toClose, supervisorClient.Close) } - c.supervisorClient = supervisorClient - c.toClose = append(c.toClose, supervisorClient.Close) - return supervisorClient, NewSupervisorSyncValidator(supervisorClient), nil + return c.supervisorClient, c.superNodeClient, c.superSyncValidator, nil } diff --git a/op-challenger/game/client/rollup_sync.go b/op-challenger/game/client/rollup_sync.go index 2b0d47c221483..495814339753f 100644 --- a/op-challenger/game/client/rollup_sync.go +++ b/op-challenger/game/client/rollup_sync.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -27,7 +28,7 @@ func (s *RollupSyncStatusValidator) ValidateNodeSynced(ctx context.Context, game return fmt.Errorf("failed to retrieve local node sync status: %w", err) } if syncStatus.CurrentL1.Number <= gameL1Head.Number { - return fmt.Errorf("%w require L1 block above %v but at %v", ErrNotInSync, gameL1Head.Number, syncStatus.CurrentL1.Number) + return fmt.Errorf("%w require L1 block above %v but at %v", types.ErrNotInSync, gameL1Head.Number, syncStatus.CurrentL1.Number) } return nil } diff --git a/op-challenger/game/client/rollup_sync_test.go b/op-challenger/game/client/rollup_sync_test.go index 8914a1956f465..138d718b04843 100644 --- a/op-challenger/game/client/rollup_sync_test.go +++ b/op-challenger/game/client/rollup_sync_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/stretchr/testify/require" ) @@ -34,7 +35,7 @@ func TestSyncStatusProvider(t *testing.T) { }, }, statusReqErr: nil, - expected: ErrNotInSync, + expected: types.ErrNotInSync, }, { name: "CurrentL1EqualToGameL1Head", @@ -45,7 +46,7 @@ func TestSyncStatusProvider(t *testing.T) { }, }, statusReqErr: nil, - expected: ErrNotInSync, + expected: types.ErrNotInSync, }, { name: "CurrentL1AboveGameL1Head", diff --git a/op-challenger/game/client/supervisor.go b/op-challenger/game/client/supervisor_sync.go similarity index 80% rename from op-challenger/game/client/supervisor.go rename to op-challenger/game/client/supervisor_sync.go index f4a57ed251918..b38147e73db4b 100644 --- a/op-challenger/game/client/supervisor.go +++ b/op-challenger/game/client/supervisor_sync.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -27,7 +28,7 @@ func (s SupervisorSyncValidator) ValidateNodeSynced(ctx context.Context, gameL1H return fmt.Errorf("failed to retrieve sync status: %w", err) } if syncStatus.MinSyncedL1.Number <= gameL1Head.Number { - return fmt.Errorf("%w require L1 block above %v but at %v", ErrNotInSync, gameL1Head.Number, syncStatus.MinSyncedL1.Number) + return fmt.Errorf("%w require L1 block above %v but at %v", types.ErrNotInSync, gameL1Head.Number, syncStatus.MinSyncedL1.Number) } return nil } diff --git a/op-challenger/game/client/supervisor_test.go b/op-challenger/game/client/supervisor_sync_test.go similarity index 92% rename from op-challenger/game/client/supervisor_test.go rename to op-challenger/game/client/supervisor_sync_test.go index 44bf124af2766..64d6f2bcc831b 100644 --- a/op-challenger/game/client/supervisor_test.go +++ b/op-challenger/game/client/supervisor_sync_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/stretchr/testify/require" ) @@ -30,7 +31,7 @@ func TestSupervisorSyncStatusProvider(t *testing.T) { syncStatus: eth.SupervisorSyncStatus{ MinSyncedL1: eth.L1BlockRef{Number: 99}, }, - expectedError: ErrNotInSync, + expectedError: types.ErrNotInSync, }, { name: "MinSyncedL1EqualToGameHead", @@ -38,7 +39,7 @@ func TestSupervisorSyncStatusProvider(t *testing.T) { syncStatus: eth.SupervisorSyncStatus{ MinSyncedL1: eth.L1BlockRef{Number: 100}, }, - expectedError: ErrNotInSync, + expectedError: types.ErrNotInSync, }, { name: "InSync", diff --git a/op-challenger/game/fault/contracts/detect.go b/op-challenger/game/fault/contracts/detect.go index 9093e88ebc1cb..3cdaf9d5ed847 100644 --- a/op-challenger/game/fault/contracts/detect.go +++ b/op-challenger/game/fault/contracts/detect.go @@ -32,14 +32,11 @@ func DetectGameType(ctx context.Context, addr common.Address, caller *batching.M case gameTypes.CannonGameType, gameTypes.PermissionedGameType, gameTypes.CannonKonaGameType, - gameTypes.AsteriscGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType, - gameTypes.AsteriscKonaGameType, gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType, - gameTypes.SuperCannonKonaGameType, - gameTypes.SuperAsteriscKonaGameType: + gameTypes.SuperCannonKonaGameType: return gameType, nil default: return gameTypes.UnknownGameType, fmt.Errorf("unsupported game type: %d", gameType) diff --git a/op-challenger/game/fault/contracts/disputegame.go b/op-challenger/game/fault/contracts/disputegame.go index 611e0e0cf604d..0a2f688d9723a 100644 --- a/op-challenger/game/fault/contracts/disputegame.go +++ b/op-challenger/game/fault/contracts/disputegame.go @@ -40,16 +40,14 @@ func NewDisputeGameContractForGame(ctx context.Context, metrics metrics.Contract func NewDisputeGameContract(ctx context.Context, metrics metrics.ContractMetricer, caller *batching.MultiCaller, gameType gameTypes.GameType, addr common.Address) (DisputeGameContract, error) { switch gameType { - case gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType, gameTypes.SuperPermissionedGameType, gameTypes.SuperAsteriscKonaGameType: + case gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType, gameTypes.SuperPermissionedGameType: return NewSuperFaultDisputeGameContract(ctx, metrics, addr, caller) case gameTypes.CannonGameType, gameTypes.PermissionedGameType, gameTypes.CannonKonaGameType, - gameTypes.AsteriscGameType, gameTypes.AlphabetGameType, - gameTypes.FastGameType, - gameTypes.AsteriscKonaGameType: + gameTypes.FastGameType: return NewPreInteropFaultDisputeGameContract(ctx, metrics, addr, caller) case gameTypes.OptimisticZKGameType: return NewOptimisticZKDisputeGameContract(metrics, addr, caller) diff --git a/op-challenger/game/fault/contracts/faultdisputegame.go b/op-challenger/game/fault/contracts/faultdisputegame.go index d6239633c71f1..1408f7e615899 100644 --- a/op-challenger/game/fault/contracts/faultdisputegame.go +++ b/op-challenger/game/fault/contracts/faultdisputegame.go @@ -81,7 +81,7 @@ func NewFaultDisputeGameContract(ctx context.Context, metrics metrics.ContractMe return nil, fmt.Errorf("failed to detect game type: %w", err) } switch gameType { - case gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType, gameTypes.SuperPermissionedGameType, gameTypes.SuperAsteriscKonaGameType: + case gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType, gameTypes.SuperPermissionedGameType: return NewSuperFaultDisputeGameContract(ctx, metrics, addr, caller) default: return NewPreInteropFaultDisputeGameContract(ctx, metrics, addr, caller) diff --git a/op-challenger/game/fault/contracts/faultdisputegame_test.go b/op-challenger/game/fault/contracts/faultdisputegame_test.go index 9b3948deb5362..4ab4938a740bd 100644 --- a/op-challenger/game/fault/contracts/faultdisputegame_test.go +++ b/op-challenger/game/fault/contracts/faultdisputegame_test.go @@ -52,7 +52,7 @@ func (c contractVersion) String() string { } func (c contractVersion) IsSuperGame() bool { - return c.gameType == gameTypes.SuperCannonGameType || c.gameType == gameTypes.SuperPermissionedGameType || c.gameType == gameTypes.SuperAsteriscKonaGameType + return c.gameType == gameTypes.SuperCannonGameType || c.gameType == gameTypes.SuperPermissionedGameType } const ( diff --git a/op-challenger/game/fault/register.go b/op-challenger/game/fault/register.go index 4373a371159b0..89c13b8e672ae 100644 --- a/op-challenger/game/fault/register.go +++ b/op-challenger/game/fault/register.go @@ -68,18 +68,18 @@ func RegisterGameTypes( registerTasks = append(registerTasks, NewCannonKonaRegisterTask(gameTypes.CannonKonaGameType, cfg, m, vm.NewKonaExecutor(), l2HeaderSource, rollupClient, syncValidator)) } if cfg.GameTypeEnabled(gameTypes.SuperCannonGameType) { - rootProvider, syncValidator, err := clients.SuperchainClients() + rootProvider, superNodeProvider, syncValidator, err := clients.SuperchainClients() if err != nil { return err } - registerTasks = append(registerTasks, NewSuperCannonRegisterTask(gameTypes.SuperCannonGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), rootProvider, syncValidator)) + registerTasks = append(registerTasks, NewSuperCannonRegisterTask(gameTypes.SuperCannonGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), rootProvider, superNodeProvider, syncValidator)) } if cfg.GameTypeEnabled(gameTypes.SuperCannonKonaGameType) { - rootProvider, syncValidator, err := clients.SuperchainClients() + rootProvider, superNodeProvider, syncValidator, err := clients.SuperchainClients() if err != nil { return err } - registerTasks = append(registerTasks, NewSuperCannonKonaRegisterTask(gameTypes.SuperCannonKonaGameType, cfg, m, vm.NewKonaSuperExecutor(), rootProvider, syncValidator)) + registerTasks = append(registerTasks, NewSuperCannonKonaRegisterTask(gameTypes.SuperCannonKonaGameType, cfg, m, vm.NewKonaSuperExecutor(), rootProvider, superNodeProvider, syncValidator)) } if cfg.GameTypeEnabled(gameTypes.PermissionedGameType) { l2HeaderSource, rollupClient, syncValidator, err := clients.SingleChainClients() @@ -89,32 +89,11 @@ func RegisterGameTypes( registerTasks = append(registerTasks, NewCannonRegisterTask(gameTypes.PermissionedGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), l2HeaderSource, rollupClient, syncValidator)) } if cfg.GameTypeEnabled(gameTypes.SuperPermissionedGameType) { - rootProvider, syncValidator, err := clients.SuperchainClients() + rootProvider, superNodeProvider, syncValidator, err := clients.SuperchainClients() if err != nil { return err } - registerTasks = append(registerTasks, NewSuperCannonRegisterTask(gameTypes.SuperPermissionedGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), rootProvider, syncValidator)) - } - if cfg.GameTypeEnabled(gameTypes.AsteriscGameType) { - l2HeaderSource, rollupClient, syncValidator, err := clients.SingleChainClients() - if err != nil { - return err - } - registerTasks = append(registerTasks, NewAsteriscRegisterTask(gameTypes.AsteriscGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), l2HeaderSource, rollupClient, syncValidator)) - } - if cfg.GameTypeEnabled(gameTypes.AsteriscKonaGameType) { - l2HeaderSource, rollupClient, syncValidator, err := clients.SingleChainClients() - if err != nil { - return err - } - registerTasks = append(registerTasks, NewAsteriscKonaRegisterTask(gameTypes.AsteriscKonaGameType, cfg, m, vm.NewKonaExecutor(), l2HeaderSource, rollupClient, syncValidator)) - } - if cfg.GameTypeEnabled(gameTypes.SuperAsteriscKonaGameType) { - rootProvider, syncValidator, err := clients.SuperchainClients() - if err != nil { - return err - } - registerTasks = append(registerTasks, NewSuperAsteriscKonaRegisterTask(gameTypes.SuperAsteriscKonaGameType, cfg, m, vm.NewKonaSuperExecutor(), rootProvider, syncValidator)) + registerTasks = append(registerTasks, NewSuperCannonRegisterTask(gameTypes.SuperPermissionedGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), rootProvider, superNodeProvider, syncValidator)) } if cfg.GameTypeEnabled(gameTypes.FastGameType) { l2HeaderSource, rollupClient, syncValidator, err := clients.SingleChainClients() diff --git a/op-challenger/game/fault/register_task.go b/op-challenger/game/fault/register_task.go index dbd10bd2fa47f..b1921d5a46be4 100644 --- a/op-challenger/game/fault/register_task.go +++ b/op-challenger/game/fault/register_task.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/outputs" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/prestates" @@ -26,6 +25,7 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/sources/caching" "github.com/ethereum/go-ethereum/common" @@ -36,7 +36,7 @@ type RegisterTask struct { gameType gameTypes.GameType skipPrestateValidation bool - syncValidator generic.SyncValidator + syncValidator gameTypes.SyncValidator getTopPrestateProvider func(ctx context.Context, prestateBlock uint64) (faultTypes.PrestateProvider, error) getBottomPrestateProvider func(ctx context.Context, prestateHash common.Hash) (faultTypes.PrestateProvider, error) @@ -52,12 +52,12 @@ type RegisterTask struct { poststateBlock uint64) (*trace.Accessor, error) } -func NewSuperCannonRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, rootProvider super.RootProvider, syncValidator generic.SyncValidator) *RegisterTask { - return newSuperCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, rootProvider, syncValidator, cfg.Cannon, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState) +func NewSuperCannonRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, rootProvider *sources.SupervisorClient, superNodeProvider *sources.SuperNodeClient, syncValidator gameTypes.SyncValidator) *RegisterTask { + return newSuperCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, rootProvider, superNodeProvider, syncValidator, cfg.Cannon, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState) } -func NewSuperCannonKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, rootProvider super.RootProvider, syncValidator generic.SyncValidator) *RegisterTask { - return newSuperCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, rootProvider, syncValidator, cfg.CannonKona, cfg.CannonKonaAbsolutePreStateBaseURL, cfg.CannonKonaAbsolutePreState) +func NewSuperCannonKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, rootProvider *sources.SupervisorClient, superNodeProvider *sources.SuperNodeClient, syncValidator gameTypes.SyncValidator) *RegisterTask { + return newSuperCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, rootProvider, superNodeProvider, syncValidator, cfg.CannonKona, cfg.CannonKonaAbsolutePreStateBaseURL, cfg.CannonKonaAbsolutePreState) } func newSuperCannonVMRegisterTaskWithConfig( @@ -65,8 +65,9 @@ func newSuperCannonVMRegisterTaskWithConfig( cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, - rootProvider super.RootProvider, - syncValidator generic.SyncValidator, + rootProvider *sources.SupervisorClient, + superNodeProvider *sources.SuperNodeClient, + syncValidator gameTypes.SyncValidator, vmCfg vm.Config, preStateBaseURL *url.URL, preState string, @@ -101,16 +102,16 @@ func newSuperCannonVMRegisterTaskWithConfig( poststateBlock uint64) (*trace.Accessor, error) { provider := vmPrestateProvider.(*vm.PrestateProvider) preimagePrestateProvider := prestateProvider.(super.PreimagePrestateProvider) - return super.NewSuperCannonTraceAccessor(logger, m, vmCfg, serverExecutor, preimagePrestateProvider, rootProvider, provider.PrestatePath(), dir, l1Head, splitDepth, prestateBlock, poststateBlock) + return super.NewSuperCannonTraceAccessor(logger, m, vmCfg, serverExecutor, preimagePrestateProvider, rootProvider, superNodeProvider, provider.PrestatePath(), dir, l1Head, splitDepth, prestateBlock, poststateBlock) }, } } -func NewCannonRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator generic.SyncValidator) *RegisterTask { +func NewCannonRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator gameTypes.SyncValidator) *RegisterTask { return newCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, l2Client, rollupClient, syncValidator, cfg.Cannon, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState) } -func NewCannonKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator generic.SyncValidator) *RegisterTask { +func NewCannonKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator gameTypes.SyncValidator) *RegisterTask { return newCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, l2Client, rollupClient, syncValidator, cfg.CannonKona, cfg.CannonKonaAbsolutePreStateBaseURL, cfg.CannonKonaAbsolutePreState) } @@ -121,7 +122,7 @@ func newCannonVMRegisterTaskWithConfig( serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, - syncValidator generic.SyncValidator, + syncValidator gameTypes.SyncValidator, vmCfg vm.Config, preStateBaseURL *url.URL, preState string, @@ -163,111 +164,7 @@ func newCannonVMRegisterTaskWithConfig( } } -func NewAsteriscRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator generic.SyncValidator) *RegisterTask { - stateConverter := asterisc.NewStateConverter(cfg.Asterisc) - return &RegisterTask{ - gameType: gameType, - syncValidator: syncValidator, - getTopPrestateProvider: func(ctx context.Context, prestateBlock uint64) (faultTypes.PrestateProvider, error) { - return outputs.NewPrestateProvider(rollupClient, prestateBlock), nil - }, - getBottomPrestateProvider: cachePrestates( - gameType, - stateConverter, - m, - cfg.AsteriscAbsolutePreStateBaseURL, - cfg.AsteriscAbsolutePreState, - filepath.Join(cfg.Datadir, "asterisc-prestates"), - func(ctx context.Context, path string) faultTypes.PrestateProvider { - return vm.NewPrestateProvider(path, stateConverter) - }), - newTraceAccessor: func( - logger log.Logger, - m metrics.Metricer, - prestateProvider faultTypes.PrestateProvider, - vmPrestateProvider faultTypes.PrestateProvider, - dir string, - l1Head eth.BlockID, - splitDepth faultTypes.Depth, - prestateBlock uint64, - poststateBlock uint64) (*trace.Accessor, error) { - provider := vmPrestateProvider.(*vm.PrestateProvider) - return outputs.NewOutputAsteriscTraceAccessor(logger, m, cfg.Asterisc, serverExecutor, l2Client, prestateProvider, provider.PrestatePath(), rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) - }, - } -} - -func NewAsteriscKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator generic.SyncValidator) *RegisterTask { - stateConverter := asterisc.NewStateConverter(cfg.Asterisc) - return &RegisterTask{ - gameType: gameType, - syncValidator: syncValidator, - getTopPrestateProvider: func(ctx context.Context, prestateBlock uint64) (faultTypes.PrestateProvider, error) { - return outputs.NewPrestateProvider(rollupClient, prestateBlock), nil - }, - getBottomPrestateProvider: cachePrestates( - gameType, - stateConverter, - m, - cfg.AsteriscKonaAbsolutePreStateBaseURL, - cfg.AsteriscKonaAbsolutePreState, - filepath.Join(cfg.Datadir, "asterisc-kona-prestates"), - func(ctx context.Context, path string) faultTypes.PrestateProvider { - return vm.NewPrestateProvider(path, stateConverter) - }), - newTraceAccessor: func( - logger log.Logger, - m metrics.Metricer, - prestateProvider faultTypes.PrestateProvider, - vmPrestateProvider faultTypes.PrestateProvider, - dir string, - l1Head eth.BlockID, - splitDepth faultTypes.Depth, - prestateBlock uint64, - poststateBlock uint64) (*trace.Accessor, error) { - provider := vmPrestateProvider.(*vm.PrestateProvider) - return outputs.NewOutputAsteriscTraceAccessor(logger, m, cfg.AsteriscKona, serverExecutor, l2Client, prestateProvider, provider.PrestatePath(), rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) - }, - } -} - -func NewSuperAsteriscKonaRegisterTask(gameType gameTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, rootProvider super.RootProvider, syncValidator generic.SyncValidator) *RegisterTask { - stateConverter := asterisc.NewStateConverter(cfg.AsteriscKona) - return &RegisterTask{ - gameType: gameType, - syncValidator: syncValidator, - skipPrestateValidation: gameType == gameTypes.SuperPermissionedGameType, - getTopPrestateProvider: func(ctx context.Context, prestateTimestamp uint64) (faultTypes.PrestateProvider, error) { - return super.NewSuperRootPrestateProvider(rootProvider, prestateTimestamp), nil - }, - getBottomPrestateProvider: cachePrestates( - gameType, - stateConverter, - m, - cfg.AsteriscKonaAbsolutePreStateBaseURL, - cfg.AsteriscKonaAbsolutePreState, - filepath.Join(cfg.Datadir, "super-asterisc-kona-prestates"), - func(ctx context.Context, path string) faultTypes.PrestateProvider { - return vm.NewPrestateProvider(path, stateConverter) - }), - newTraceAccessor: func( - logger log.Logger, - m metrics.Metricer, - prestateProvider faultTypes.PrestateProvider, - vmPrestateProvider faultTypes.PrestateProvider, - dir string, - l1Head eth.BlockID, - splitDepth faultTypes.Depth, - prestateBlock uint64, - poststateBlock uint64) (*trace.Accessor, error) { - provider := vmPrestateProvider.(*vm.PrestateProvider) - preimagePrestateProvider := prestateProvider.(super.PreimagePrestateProvider) - return super.NewSuperAsteriscKonaTraceAccessor(logger, m, cfg.AsteriscKona, serverExecutor, preimagePrestateProvider, rootProvider, provider.PrestatePath(), dir, l1Head, splitDepth, prestateBlock, poststateBlock) - }, - } -} - -func NewAlphabetRegisterTask(gameType gameTypes.GameType, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator generic.SyncValidator) *RegisterTask { +func NewAlphabetRegisterTask(gameType gameTypes.GameType, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator gameTypes.SyncValidator) *RegisterTask { return &RegisterTask{ gameType: gameType, syncValidator: syncValidator, diff --git a/op-challenger/game/fault/register_task_test.go b/op-challenger/game/fault/register_task_test.go index 5eaeb15f458b3..65068bc5669ee 100644 --- a/op-challenger/game/fault/register_task_test.go +++ b/op-challenger/game/fault/register_task_test.go @@ -76,7 +76,7 @@ func TestRegisterOracle_AddsOracle(t *testing.T) { } for _, testCase := range tests { t.Run(testCase.name, func(t *testing.T) { - for _, gameType := range []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.SuperCannonGameType, gameTypes.SuperAsteriscKonaGameType} { + for _, gameType := range []gameTypes.GameType{gameTypes.CannonGameType, gameTypes.SuperCannonGameType, gameTypes.SuperCannonKonaGameType} { t.Run(fmt.Sprintf("%v", gameType), func(t *testing.T) { gameFactoryAddr := common.Address{0xaa} gameImplAddr := common.Address{0xbb} @@ -86,7 +86,7 @@ func TestRegisterOracle_AddsOracle(t *testing.T) { rpc.SetResponse(gameFactoryAddr, "version", rpcblock.Latest, nil, []interface{}{testCase.version}) if gameType == gameTypes.CannonGameType { rpc.AddContract(gameImplAddr, snapshots.LoadFaultDisputeGameABI()) - } else if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperAsteriscKonaGameType { + } else if gameType == gameTypes.SuperCannonGameType || gameType == gameTypes.SuperCannonKonaGameType { rpc.AddContract(gameImplAddr, snapshots.LoadSuperFaultDisputeGameABI()) } else { t.Fatalf("game type %v not supported", gameType) diff --git a/op-challenger/game/fault/trace/asterisc/provider.go b/op-challenger/game/fault/trace/asterisc/provider.go deleted file mode 100644 index c622b6ed9dd1a..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/provider.go +++ /dev/null @@ -1,198 +0,0 @@ -package asterisc - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "os" - "path/filepath" - - "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum-optimism/optimism/op-program/host/kvstore" - kvtypes "github.com/ethereum-optimism/optimism/op-program/host/types" - "github.com/ethereum-optimism/optimism/op-service/ioutil" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -type AsteriscTraceProvider struct { - logger log.Logger - dir string - prestate string - generator utils.ProofGenerator - gameDepth types.Depth - preimageLoader *utils.PreimageLoader - stateConverter vm.StateConverter - cfg vm.Config - - types.PrestateProvider - - // lastStep stores the last step in the actual trace if known. 0 indicates unknown. - // Cached as an optimisation to avoid repeatedly attempting to execute beyond the end of the trace. - lastStep uint64 -} - -func NewTraceProvider(logger log.Logger, m vm.Metricer, cfg vm.Config, vmCfg vm.OracleServerExecutor, prestateProvider types.PrestateProvider, asteriscPrestate string, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProvider { - return &AsteriscTraceProvider{ - logger: logger, - dir: dir, - prestate: asteriscPrestate, - generator: vm.NewExecutor(logger, m, cfg, vmCfg, asteriscPrestate, localInputs), - gameDepth: gameDepth, - preimageLoader: utils.NewPreimageLoader(func() (utils.PreimageSource, error) { - return kvstore.NewDiskKV(logger, vm.PreimageDir(dir), kvtypes.DataFormatFile) - }), - PrestateProvider: prestateProvider, - stateConverter: NewStateConverter(cfg), - cfg: cfg, - } -} - -func (p *AsteriscTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { - traceIndex := pos.TraceIndex(p.gameDepth) - if !traceIndex.IsUint64() { - return common.Hash{}, errors.New("trace index out of bounds") - } - proof, err := p.loadProof(ctx, traceIndex.Uint64()) - if err != nil { - return common.Hash{}, err - } - value := proof.ClaimValue - - if value == (common.Hash{}) { - return common.Hash{}, errors.New("proof missing post hash") - } - return value, nil -} - -func (p *AsteriscTraceProvider) GetStepData(ctx context.Context, pos types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { - traceIndex := pos.TraceIndex(p.gameDepth) - if !traceIndex.IsUint64() { - return nil, nil, nil, errors.New("trace index out of bounds") - } - proof, err := p.loadProof(ctx, traceIndex.Uint64()) - if err != nil { - return nil, nil, nil, err - } - value := ([]byte)(proof.StateData) - if len(value) == 0 { - return nil, nil, nil, errors.New("proof missing state data") - } - data := ([]byte)(proof.ProofData) - if data == nil { - return nil, nil, nil, errors.New("proof missing proof data") - } - oracleData, err := p.preimageLoader.LoadPreimage(proof) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed to load preimage: %w", err) - } - return value, data, oracleData, nil -} - -func (p *AsteriscTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { - return nil, types.ErrL2BlockNumberValid -} - -// loadProof will attempt to load or generate the proof data at the specified index -// If the requested index is beyond the end of the actual trace it is extended with no-op instructions. -func (p *AsteriscTraceProvider) loadProof(ctx context.Context, i uint64) (*utils.ProofData, error) { - // Attempt to read the last step from disk cache - if p.lastStep == 0 { - step, err := utils.ReadLastStep(p.dir) - if err != nil { - p.logger.Warn("Failed to read last step from disk cache", "err", err) - } else { - p.lastStep = step - } - } - // If the last step is tracked, set i to the last step to generate or load the final proof - if p.lastStep != 0 && i > p.lastStep { - i = p.lastStep - } - path := filepath.Join(p.dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) - file, err := ioutil.OpenDecompressed(path) - if errors.Is(err, os.ErrNotExist) { - if err := p.generator.GenerateProof(ctx, p.dir, i); err != nil { - return nil, fmt.Errorf("generate asterisc trace with proof at %v: %w", i, err) - } - // Try opening the file again now and it should exist. - file, err = ioutil.OpenDecompressed(path) - if errors.Is(err, os.ErrNotExist) { - // Expected proof wasn't generated, check if we reached the end of execution - proof, step, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) - if err != nil { - return nil, err - } - if exited && step <= i { - p.logger.Warn("Requested proof was after the program exited", "proof", i, "last", step) - // The final instruction has already been applied to this state, so the last step we can execute - // is one before its Step value. - p.lastStep = step - 1 - // Extend the trace out to the full length using a no-op instruction that doesn't change any state - // No execution is done, so no proof-data or oracle values are required. - if err := utils.WriteLastStep(p.dir, proof, p.lastStep); err != nil { - p.logger.Warn("Failed to write last step to disk cache", "step", p.lastStep) - } - return proof, nil - } else { - return nil, fmt.Errorf("expected proof not generated but final state was not exited, requested step %v, final state at step %v", i, step) - } - } - } - if err != nil { - return nil, fmt.Errorf("cannot open proof file (%v): %w", path, err) - } - defer file.Close() - var proof utils.ProofData - err = json.NewDecoder(file).Decode(&proof) - if err != nil { - return nil, fmt.Errorf("failed to read proof (%v): %w", path, err) - } - return &proof, nil -} - -// AsteriscTraceProviderForTest is a AsteriscTraceProvider that can find the step referencing the preimage read -// Only to be used for testing -type AsteriscTraceProviderForTest struct { - *AsteriscTraceProvider -} - -func NewTraceProviderForTest(logger log.Logger, m vm.Metricer, cfg *config.Config, localInputs utils.LocalGameInputs, dir string, gameDepth types.Depth) *AsteriscTraceProviderForTest { - p := &AsteriscTraceProvider{ - logger: logger, - dir: dir, - prestate: cfg.AsteriscAbsolutePreState, - generator: vm.NewExecutor(logger, m, cfg.Asterisc, vm.NewOpProgramServerExecutor(logger), cfg.AsteriscAbsolutePreState, localInputs), - gameDepth: gameDepth, - preimageLoader: utils.NewPreimageLoader(func() (utils.PreimageSource, error) { - return kvstore.NewDiskKV(logger, vm.PreimageDir(dir), kvtypes.DataFormatFile) - }), - stateConverter: NewStateConverter(cfg.Asterisc), - cfg: cfg.Asterisc, - } - return &AsteriscTraceProviderForTest{p} -} - -func (p *AsteriscTraceProviderForTest) FindStep(ctx context.Context, start uint64, preimage utils.PreimageOpt) (uint64, error) { - // Run asterisc to find the step that meets the preimage conditions - if err := p.generator.(*vm.Executor).DoGenerateProof(ctx, p.dir, start, math.MaxUint64, preimage()...); err != nil { - return 0, fmt.Errorf("generate asterisc trace (until preimage read): %w", err) - } - // Load the step from the state asterisc finished with - _, step, exited, err := p.stateConverter.ConvertStateToProof(ctx, vm.FinalStatePath(p.dir, p.cfg.BinarySnapshots)) - if err != nil { - return 0, fmt.Errorf("failed to load final state: %w", err) - } - // Check we didn't get to the end of the trace without finding the preimage read we were looking for - if exited { - return 0, fmt.Errorf("preimage read not found: %w", io.EOF) - } - // The state is the post-state so the step we want to execute to read the preimage is step - 1. - return step - 1, nil -} diff --git a/op-challenger/game/fault/trace/asterisc/provider_test.go b/op-challenger/game/fault/trace/asterisc/provider_test.go deleted file mode 100644 index 609fa143e86eb..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/provider_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package asterisc - -import ( - "context" - "embed" - "encoding/json" - "fmt" - "math" - "math/big" - "os" - "path/filepath" - "testing" - - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum-optimism/optimism/op-service/ioutil" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -//go:embed test_data -var testData embed.FS -var asteriscWitnessLen = 362 - -func PositionFromTraceIndex(provider *AsteriscTraceProvider, idx *big.Int) types.Position { - return types.NewPosition(provider.gameDepth, idx) -} - -func TestGet(t *testing.T) { - dataDir, prestate := setupTestData(t) - t.Run("ExistingProof", func(t *testing.T) { - provider, generator := setupWithTestData(t, dataDir, prestate) - value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, common.Big0)) - require.NoError(t, err) - require.Equal(t, common.HexToHash("0x034689707b571db46b32c9e433def18e648f4e1fa9e5abd4012e7913031bfc10"), value) - require.Empty(t, generator.generated) - }) - - t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { - provider, generator := setupWithTestData(t, dataDir, prestate) - largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) - _, err := provider.Get(context.Background(), largePosition) - require.ErrorContains(t, err, "trace index out of bounds") - require.Empty(t, generator.generated) - }) - - t.Run("MissingPostHash", func(t *testing.T) { - provider, generator := setupWithTestData(t, dataDir, prestate) - _, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) - require.ErrorContains(t, err, "missing post hash") - require.Empty(t, generator.generated) - }) - - t.Run("IgnoreUnknownFields", func(t *testing.T) { - provider, generator := setupWithTestData(t, dataDir, prestate) - value, err := provider.Get(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) - require.NoError(t, err) - expected := common.HexToHash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - require.Equal(t, expected, value) - require.Empty(t, generator.generated) - }) -} - -func TestGetStepData(t *testing.T) { - t.Run("ExistingProof", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, common.Big0)) - require.NoError(t, err) - expected := common.FromHex("0x354cfaf28a5b60c3f64f22f9f171b64aa067f90c6de6c96f725f44c5cf9f8ac1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080e080000000000000000000000007f0000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") - require.Equal(t, expected, value) - expectedProof := common.FromHex("0x000000000000000003350100930581006f00800100000000970f000067800f01000000000000000097c2ffff938282676780020000000000032581009308e0050e1893682c323d6695396f1122b3cb562af8c65cab19978c9246434fda0536c90ca1cfabf684ebce3ad9fbd54000a2b258f8d0e447c1bb6f7e97de47aadfc12cd7b6f466bfd024daa905886c5f638f4692d843709e6c1c0d9eb2e251c626d53d15e04b59735fe0781bc4357a4243fbc28e6981902a8c2669a2d6456f7a964423db5d1585da978861f8b84067654b29490275c82b54083ee09c82eb7aa9ae693911226bb8297ad82c0963ae943f22d0c6086f4f14437e4d1c87ceb17e68caf5eaec77f14b46225b417d2191ca7b49564c896836a95ad4e9c383bd1c8ff9d8e888c64fb3836daa9535e58372e9646b7b144219980a4389aca5da241c3ec11fbc9297bd7a94ac671ccec288604c23a0072b0c1ed069198959cacdc2574aff65b7eceffc391e21778a1775deceb3ec0990836df98d98a4f3f0dc854587230fbf59e4daa60e8240d74caf90f7e2cd014c1d5d707b2e44269d9a9caf133882fe1ebb2f4237f6282abe89639b357e9231418d0c41373229ae9edfa6815bec484cb79772c9e2a7d80912123558f79b539bb45d435f2a4446970f1e2123494740285cec3491b0a41a9fd7403bdc8cd239a87508039a77b48ee39a951a8bd196b583de2b93444aafd456d0cd92050fa6a816d5183c1d75e96df540c8ac3bb8638b971f0cf3fb5b4a321487a1c8992b921de110f3d5bbb87369b25fe743ad7e789ca52d9f9fe62ccb103b78fe65eaa2cd47895022c590639c8f0c6a3999d8a5c71ed94d355815851b479f8d93eae90822294c96b39724b33491f8497b0bf7e1b995b37e4d759ff8a7958d194da6e00c475a6ddcf6efcb5fb4bb383c9b273da18d01e000dbe9c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc3ec7d4dabb75e0d3e144d7cc882372d13746b6dcd481b1b229bcaec9f7422cdfb84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb50000000000000000420000000000000035000000000000000000000000000000060000000000000000100000000000001900000000000000480000000000001050edbc06b4bfc3ee108b66f7a8f772ca4d90e1a085f4a8398505920f7465bb44b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d3021ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85e58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a193440eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968ffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f839867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756afcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0f9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5f8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf8923490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99cc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8beccda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d22733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981fe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0b46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc30f3e39c5412c30550d1d07fb07ff0e546fbeea1988f6658f04a9b19693e5b99d84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb5") - require.Equal(t, expectedProof, proof) - // TODO: Need to add some oracle data - require.Nil(t, data) - require.Empty(t, generator.generated) - }) - - t.Run("ErrorsTraceIndexOutOfBounds", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - largePosition := PositionFromTraceIndex(provider, new(big.Int).Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2))) - _, _, _, err := provider.GetStepData(context.Background(), largePosition) - require.ErrorContains(t, err, "trace index out of bounds") - require.Empty(t, generator.generated) - }) - - t.Run("GenerateProof", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - generator.finalState = &VMState{ - Step: 10, - Exited: true, - Witness: make([]byte, asteriscWitnessLen), - } - generator.proof = &utils.ProofData{ - ClaimValue: common.Hash{0xaa}, - StateData: []byte{0xbb}, - ProofData: []byte{0xcc}, - OracleKey: common.Hash{0xdd}.Bytes(), - OracleValue: []byte{0xdd}, - OracleOffset: 10, - } - preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(4))) - require.NoError(t, err) - require.Contains(t, generator.generated, 4, "should have tried to generate the proof") - - require.EqualValues(t, generator.proof.StateData, preimage) - require.EqualValues(t, generator.proof.ProofData, proof) - expectedData := types.NewPreimageOracleData(generator.proof.OracleKey, generator.proof.OracleValue, generator.proof.OracleOffset) - require.EqualValues(t, expectedData, data) - }) - - t.Run("ProofAfterEndOfTrace", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - generator.finalState = &VMState{ - Step: 10, - Exited: true, - Witness: make([]byte, asteriscWitnessLen), - } - generator.proof = &utils.ProofData{ - ClaimValue: common.Hash{0xaa}, - StateData: []byte{0xbb}, - ProofData: []byte{0xcc}, - OracleKey: common.Hash{0xdd}.Bytes(), - OracleValue: []byte{0xdd}, - OracleOffset: 10, - } - preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) - require.NoError(t, err) - require.Contains(t, generator.generated, 7000, "should have tried to generate the proof") - - witness := generator.finalState.Witness - require.EqualValues(t, witness, preimage) - require.Equal(t, []byte{}, proof) - require.Nil(t, data) - }) - - t.Run("ReadLastStepFromDisk", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, initGenerator := setupWithTestData(t, dataDir, prestate) - initGenerator.finalState = &VMState{ - Step: 10, - Exited: true, - Witness: make([]byte, asteriscWitnessLen), - } - initGenerator.proof = &utils.ProofData{ - ClaimValue: common.Hash{0xaa}, - StateData: []byte{0xbb}, - ProofData: []byte{0xcc}, - OracleKey: common.Hash{0xdd}.Bytes(), - OracleValue: []byte{0xdd}, - OracleOffset: 10, - } - _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) - require.NoError(t, err) - require.Contains(t, initGenerator.generated, 7000, "should have tried to generate the proof") - - provider, generator := setupWithTestData(t, dataDir, prestate) - generator.finalState = &VMState{ - Step: 10, - Exited: true, - Witness: make([]byte, asteriscWitnessLen), - } - generator.proof = &utils.ProofData{ - ClaimValue: common.Hash{0xaa}, - StateData: []byte{0xbb}, - ProofData: []byte{0xcc}, - } - preimage, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(7000))) - require.NoError(t, err) - require.Empty(t, generator.generated, "should not have to generate the proof again") - - require.EqualValues(t, initGenerator.finalState.Witness, preimage) - require.Empty(t, proof) - require.Nil(t, data) - }) - - t.Run("MissingStateData", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - _, _, _, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(1))) - require.ErrorContains(t, err, "missing state data") - require.Empty(t, generator.generated) - }) - - t.Run("IgnoreUnknownFields", func(t *testing.T) { - dataDir, prestate := setupTestData(t) - provider, generator := setupWithTestData(t, dataDir, prestate) - value, proof, data, err := provider.GetStepData(context.Background(), PositionFromTraceIndex(provider, big.NewInt(2))) - require.NoError(t, err) - expected := common.FromHex("cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") - require.Equal(t, expected, value) - expectedProof := common.FromHex("dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") - require.Equal(t, expectedProof, proof) - require.Empty(t, generator.generated) - require.Nil(t, data) - }) -} - -func setupTestData(t *testing.T) (string, string) { - srcDir := filepath.Join("test_data", "proofs") - entries, err := testData.ReadDir(srcDir) - require.NoError(t, err) - dataDir := t.TempDir() - require.NoError(t, os.Mkdir(filepath.Join(dataDir, utils.ProofsDir), 0o777)) - for _, entry := range entries { - path := filepath.Join(srcDir, entry.Name()) - file, err := testData.ReadFile(path) - require.NoErrorf(t, err, "reading %v", path) - proofFile := filepath.Join(dataDir, utils.ProofsDir, entry.Name()+".gz") - err = ioutil.WriteCompressedBytes(proofFile, file, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) - require.NoErrorf(t, err, "writing %v", path) - } - return dataDir, "state.json" -} - -func setupWithTestData(t *testing.T, dataDir string, prestate string) (*AsteriscTraceProvider, *stubGenerator) { - generator := &stubGenerator{} - return &AsteriscTraceProvider{ - logger: testlog.Logger(t, log.LevelInfo), - dir: dataDir, - generator: generator, - prestate: filepath.Join(dataDir, prestate), - gameDepth: 63, - stateConverter: generator, - }, generator -} - -type stubGenerator struct { - generated []int // Using int makes assertions easier - finalState *VMState - proof *utils.ProofData - - finalStatePath string -} - -func (e *stubGenerator) ConvertStateToProof(ctx context.Context, statePath string) (*utils.ProofData, uint64, bool, error) { - if statePath == e.finalStatePath { - return &utils.ProofData{ - ClaimValue: e.finalState.StateHash, - StateData: e.finalState.Witness, - ProofData: []byte{}, - }, e.finalState.Step, e.finalState.Exited, nil - } else { - return nil, 0, false, fmt.Errorf("loading unexpected state: %s, only support: %s", statePath, e.finalStatePath) - } -} - -func (e *stubGenerator) GenerateProof(ctx context.Context, dir string, i uint64) error { - e.generated = append(e.generated, int(i)) - var proofFile string - var data []byte - var err error - if e.finalState != nil && e.finalState.Step <= i { - // Requesting a trace index past the end of the trace - proofFile = vm.FinalStatePath(dir, false) - e.finalStatePath = proofFile - data, err = json.Marshal(e.finalState) - if err != nil { - return err - } - return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) - } - if e.proof != nil { - proofFile = filepath.Join(dir, utils.ProofsDir, fmt.Sprintf("%d.json.gz", i)) - data, err = json.Marshal(e.proof) - if err != nil { - return err - } - return ioutil.WriteCompressedBytes(proofFile, data, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0o644) - } - return nil -} diff --git a/op-challenger/game/fault/trace/asterisc/state_converter.go b/op-challenger/game/fault/trace/asterisc/state_converter.go deleted file mode 100644 index 67b99f07db06e..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/state_converter.go +++ /dev/null @@ -1,70 +0,0 @@ -package asterisc - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os/exec" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" -) - -// The state struct will be read from json. -// other fields included in json are specific to FPVM implementation, and not required for trace provider. -type VMState struct { - PC uint64 `json:"pc"` - Exited bool `json:"exited"` - Step uint64 `json:"step"` - Witness hexutil.Bytes `json:"witness"` - StateHash common.Hash `json:"stateHash"` -} - -type StateConverter struct { - vmConfig vm.Config - cmdExecutor func(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) -} - -func NewStateConverter(vmConfig vm.Config) *StateConverter { - return &StateConverter{ - vmConfig: vmConfig, - cmdExecutor: runCmd, - } -} - -func (c *StateConverter) ConvertStateToProof(ctx context.Context, statePath string) (*utils.ProofData, uint64, bool, error) { - stdOut, stdErr, err := c.cmdExecutor(ctx, c.vmConfig.VmBin, "witness", "--input", statePath) - if err != nil { - return nil, 0, false, fmt.Errorf("state conversion failed: %w (%s)", err, stdErr) - } - var data VMState - if err := json.Unmarshal([]byte(stdOut), &data); err != nil { - return nil, 0, false, fmt.Errorf("failed to parse state data: %w", err) - } - // Extend the trace out to the full length using a no-op instruction that doesn't change any state - // No execution is done, so no proof-data or oracle values are required. - return &utils.ProofData{ - ClaimValue: data.StateHash, - StateData: data.Witness, - ProofData: []byte{}, - OracleKey: nil, - OracleValue: nil, - OracleOffset: 0, - }, data.Step, data.Exited, nil -} - -func runCmd(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) { - var outBuf bytes.Buffer - var errBuf bytes.Buffer - cmd := exec.CommandContext(ctx, binary, args...) - cmd.Stdout = &outBuf - cmd.Stderr = &errBuf - err = cmd.Run() - stdOut = outBuf.String() - stdErr = errBuf.String() - return -} diff --git a/op-challenger/game/fault/trace/asterisc/state_converter_test.go b/op-challenger/game/fault/trace/asterisc/state_converter_test.go deleted file mode 100644 index 8a3d9c4b26916..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/state_converter_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package asterisc - -import ( - "context" - "encoding/json" - "errors" - "testing" - - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -const testBinary = "./somewhere/asterisc" - -func TestStateConverter(t *testing.T) { - setup := func(t *testing.T) (*StateConverter, *capturingExecutor) { - vmCfg := vm.Config{ - VmBin: testBinary, - } - executor := &capturingExecutor{} - converter := NewStateConverter(vmCfg) - converter.cmdExecutor = executor.exec - return converter, executor - } - - t.Run("Valid", func(t *testing.T) { - converter, executor := setup(t) - data := VMState{ - Witness: []byte{1, 2, 3, 4}, - StateHash: common.Hash{0xab}, - Step: 42, - Exited: true, - PC: 11, - } - ser, err := json.Marshal(data) - require.NoError(t, err) - executor.stdOut = string(ser) - proof, step, exited, err := converter.ConvertStateToProof(context.Background(), "foo.json") - require.NoError(t, err) - require.Equal(t, data.Exited, exited) - require.Equal(t, data.Step, step) - require.Equal(t, data.StateHash, proof.ClaimValue) - require.Equal(t, data.Witness, proof.StateData) - require.NotNil(t, proof.ProofData, "later validations require this to be non-nil") - - require.Equal(t, testBinary, executor.binary) - require.Equal(t, []string{"witness", "--input", "foo.json"}, executor.args) - }) - - t.Run("CommandError", func(t *testing.T) { - converter, executor := setup(t) - executor.err = errors.New("boom") - _, _, _, err := converter.ConvertStateToProof(context.Background(), "foo.json") - require.ErrorIs(t, err, executor.err) - }) - - t.Run("InvalidOutput", func(t *testing.T) { - converter, executor := setup(t) - executor.stdOut = "blah blah" - _, _, _, err := converter.ConvertStateToProof(context.Background(), "foo.json") - require.ErrorContains(t, err, "failed to parse state data") - }) -} - -type capturingExecutor struct { - binary string - args []string - - stdOut string - stdErr string - err error -} - -func (c *capturingExecutor) exec(_ context.Context, binary string, args ...string) (string, string, error) { - c.binary = binary - c.args = args - return c.stdOut, c.stdErr, c.err -} diff --git a/op-challenger/game/fault/trace/asterisc/test_data/invalid.json b/op-challenger/game/fault/trace/asterisc/test_data/invalid.json deleted file mode 100644 index 06a76bf5b23de..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/test_data/invalid.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "preimageKey": 1 -} diff --git a/op-challenger/game/fault/trace/asterisc/test_data/proofs/0.json b/op-challenger/game/fault/trace/asterisc/test_data/proofs/0.json deleted file mode 100644 index e5838ddfc5abb..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/test_data/proofs/0.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "step": 0, - "pre": "0x03abd5c535c08bae7c4ad48fcae39b65f9c25239f65b4376c58638d262c97381", - "post": "0x034689707b571db46b32c9e433def18e648f4e1fa9e5abd4012e7913031bfc10", - "state-data": "0x354cfaf28a5b60c3f64f22f9f171b64aa067f90c6de6c96f725f44c5cf9f8ac1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080e080000000000000000000000007f0000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "proof-data": "0x000000000000000003350100930581006f00800100000000970f000067800f01000000000000000097c2ffff938282676780020000000000032581009308e0050e1893682c323d6695396f1122b3cb562af8c65cab19978c9246434fda0536c90ca1cfabf684ebce3ad9fbd54000a2b258f8d0e447c1bb6f7e97de47aadfc12cd7b6f466bfd024daa905886c5f638f4692d843709e6c1c0d9eb2e251c626d53d15e04b59735fe0781bc4357a4243fbc28e6981902a8c2669a2d6456f7a964423db5d1585da978861f8b84067654b29490275c82b54083ee09c82eb7aa9ae693911226bb8297ad82c0963ae943f22d0c6086f4f14437e4d1c87ceb17e68caf5eaec77f14b46225b417d2191ca7b49564c896836a95ad4e9c383bd1c8ff9d8e888c64fb3836daa9535e58372e9646b7b144219980a4389aca5da241c3ec11fbc9297bd7a94ac671ccec288604c23a0072b0c1ed069198959cacdc2574aff65b7eceffc391e21778a1775deceb3ec0990836df98d98a4f3f0dc854587230fbf59e4daa60e8240d74caf90f7e2cd014c1d5d707b2e44269d9a9caf133882fe1ebb2f4237f6282abe89639b357e9231418d0c41373229ae9edfa6815bec484cb79772c9e2a7d80912123558f79b539bb45d435f2a4446970f1e2123494740285cec3491b0a41a9fd7403bdc8cd239a87508039a77b48ee39a951a8bd196b583de2b93444aafd456d0cd92050fa6a816d5183c1d75e96df540c8ac3bb8638b971f0cf3fb5b4a321487a1c8992b921de110f3d5bbb87369b25fe743ad7e789ca52d9f9fe62ccb103b78fe65eaa2cd47895022c590639c8f0c6a3999d8a5c71ed94d355815851b479f8d93eae90822294c96b39724b33491f8497b0bf7e1b995b37e4d759ff8a7958d194da6e00c475a6ddcf6efcb5fb4bb383c9b273da18d01e000dbe9c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc3ec7d4dabb75e0d3e144d7cc882372d13746b6dcd481b1b229bcaec9f7422cdfb84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb50000000000000000420000000000000035000000000000000000000000000000060000000000000000100000000000001900000000000000480000000000001050edbc06b4bfc3ee108b66f7a8f772ca4d90e1a085f4a8398505920f7465bb44b4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d3021ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85e58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a193440eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968ffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f839867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756afcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0f9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5f8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf8923490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99cc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8beccda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d22733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981fe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0b46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0c65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2f4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd95a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e3774df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652cdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618db8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea32293237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d7358448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a927ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757bf558bebd2ceec7f3c5dce04a4782f88c2c6036ae78ee206d0bc5289d20461a2e21908c2968c0699040a6fd866a577a99a9d2ec88745c815fd4a472c789244daae824d72ddc272aab68a8c3022e36f10454437c1886f3ff9927b64f232df414f27e429a4bef3083bc31a671d046ea5c1f5b8c3094d72868d9dfdc12c7334ac5f743cc5c365a9a6a15c1f240ac25880c7a9d1de290696cb766074a1d83d9278164adcf616c3bfabf63999a01966c998b7bb572774035a63ead49da73b5987f34775786645d0c5dd7c04a2f8a75dcae085213652f5bce3ea8b9b9bedd1cab3c5e9b88b152c9b8a7b79637d35911848b0c41e7cc7cca2ab4fe9a15f9c38bb4bb9390c4e2d8ce834ffd7a6cd85d7113d4521abb857774845c4291e6f6d010d97e3185bc799d83e3bb31501b3da786680df30fbc18eb41cbce611e8c0e9c72f69571ca10d3ef857d04d9c03ead7c6317d797a090fa1271ad9c7addfbcb412e9643d4fb33b1809c42623f474055fa9400a2027a7a885c8dfa4efe20666b4ee27d7529c134d7f28d53f175f6bf4b62faa2110d5b76f0f770c15e628181c1fcc18f970a9c34d24b2fc8c50ca9c07a7156ef4e5ff4bdf002eda0b11c1d359d0b59a54680704dbb9db631457879b27e0dfdbe50158fd9cf9b4cf77605c4ac4c95bd65fc9f6f9295a686647cb999090819cda700820c282c613cedcd218540bbc6f37b01c6567c4a1ea624f092a3a5cca2d6f0f0db231972fce627f0ecca0dee60f17551c5f8fdaeb5ab560b2ceb781cdb339361a0fbee1b9dffad59115138c8d6a70dda9ccc1bf0bbdd7fee15764845db875f6432559ff8dbc9055324431bc34e5b93d15da307317849eccd90c0c7b98870b9317c15a5959dcfb84c76dcc908c4fe6ba92126339bf06e458f6646df5e83ba7c3d35bc263b3222c8e9040068847749ca8e8f95045e4342aeb521eb3a5587ec268ed3aa6faf32b62b0bc41a9d549521f406fc30f3e39c5412c30550d1d07fb07ff0e546fbeea1988f6658f04a9b19693e5b99d84e35c5d92171376cae5c86300822d729cd3a8479583bef09527027dba5f11263c5cbbeb3834b7a5c1cba9aa5fee0c95ec3f17a33ec3d8047fff799187f5ae2040bbe913c226c34c9fbe4389dd728984257a816892b3cae3e43191dd291f0eb5" -} diff --git a/op-challenger/game/fault/trace/asterisc/test_data/proofs/1.json b/op-challenger/game/fault/trace/asterisc/test_data/proofs/1.json deleted file mode 100644 index 311847daa5a05..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/test_data/proofs/1.json +++ /dev/null @@ -1,2 +0,0 @@ -{} - diff --git a/op-challenger/game/fault/trace/asterisc/test_data/proofs/2.json b/op-challenger/game/fault/trace/asterisc/test_data/proofs/2.json deleted file mode 100644 index 96f58c8e8cb39..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/test_data/proofs/2.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "foo": 0, - "bar": "0x71f9eb93ff904e5c03c3425228ef75766db0c906ad239df9a7a7f0d9c6a89705", - "step": 0, - "pre": "0x03abd5c535c08bae7c4ad48fcae39b65f9c25239f65b4376c58638d262c97381", - "post": "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", - "state-data": "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", - "proof-data": "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" -} diff --git a/op-challenger/game/fault/trace/asterisc/test_data/state.json b/op-challenger/game/fault/trace/asterisc/test_data/state.json deleted file mode 100644 index 00dfc2d666c84..0000000000000 --- a/op-challenger/game/fault/trace/asterisc/test_data/state.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "pc": 0, - "exited": false, - "step": 0, - "witness": "wOSi8Cm62dDmKt1OGwxlLrSznk6zE4ghp7evP1rfrXYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIGCAAAAAAAAAAAAAAAAB/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", - "stateHash": "0x03216fdc4a7bfd4c7160fa946d1bfe451d13ff32da496609fe18355282b910c6" -} diff --git a/op-challenger/game/fault/trace/outputs/output_asterisc.go b/op-challenger/game/fault/trace/outputs/output_asterisc.go deleted file mode 100644 index 10822a5a9aeb4..0000000000000 --- a/op-challenger/game/fault/trace/outputs/output_asterisc.go +++ /dev/null @@ -1,52 +0,0 @@ -package outputs - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum-optimism/optimism/op-challenger/metrics" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -func NewOutputAsteriscTraceAccessor( - logger log.Logger, - m metrics.Metricer, - cfg vm.Config, - vmCfg vm.OracleServerExecutor, - l2Client utils.L2HeaderSource, - prestateProvider types.PrestateProvider, - asteriscPrestate string, - rollupClient OutputRollupClient, - dir string, - l1Head eth.BlockID, - splitDepth types.Depth, - prestateBlock uint64, - poststateBlock uint64, -) (*trace.Accessor, error) { - outputProvider := NewTraceProvider(logger, prestateProvider, rollupClient, l2Client, l1Head, splitDepth, prestateBlock, poststateBlock) - asteriscCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, agreed utils.Proposal, claimed utils.Proposal) (types.TraceProvider, error) { - logger := logger.New("pre", agreed.OutputRoot, "post", claimed.OutputRoot, "localContext", localContext) - subdir := filepath.Join(dir, localContext.Hex()) - localInputs, err := utils.FetchLocalInputsFromProposals(ctx, l1Head.Hash, l2Client, agreed, claimed) - if err != nil { - return nil, fmt.Errorf("failed to fetch asterisc local inputs: %w", err) - } - provider := asterisc.NewTraceProvider(logger, m.ToTypedVmMetrics(cfg.VmType.String()), cfg, vmCfg, prestateProvider, asteriscPrestate, localInputs, subdir, depth) - return provider, nil - } - - metricsLabel := fmt.Sprintf("outputs_%s_provider", cfg.VmType.String()) - cache := NewProviderCache(m, metricsLabel, asteriscCreator) - selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) - return trace.NewAccessor(selector), nil -} diff --git a/op-challenger/game/fault/trace/outputs/provider.go b/op-challenger/game/fault/trace/outputs/provider.go index 410b7b15d02d0..2bacad8bcaadb 100644 --- a/op-challenger/game/fault/trace/outputs/provider.go +++ b/op-challenger/game/fault/trace/outputs/provider.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "time" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" @@ -114,7 +115,9 @@ func (o *OutputTraceProvider) GetL2BlockNumberChallenge(ctx context.Context) (*t if err != nil { return nil, err } - header, err := o.l2Client.HeaderByNumber(ctx, new(big.Int).SetUint64(outputBlock)) + tCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + header, err := o.l2Client.HeaderByNumber(tCtx, new(big.Int).SetUint64(outputBlock)) if err != nil { return nil, fmt.Errorf("failed to retrieve L2 block header %v: %w", outputBlock, err) } diff --git a/op-challenger/game/fault/trace/super/provider_supernode.go b/op-challenger/game/fault/trace/super/provider_supernode.go new file mode 100644 index 0000000000000..60315cefb0647 --- /dev/null +++ b/op-challenger/game/fault/trace/super/provider_supernode.go @@ -0,0 +1,166 @@ +package super + +import ( + "context" + "fmt" + "slices" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + types2 "github.com/ethereum-optimism/optimism/op-challenger/game/types" + interopTypes "github.com/ethereum-optimism/optimism/op-program/client/interop/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" +) + +type SuperNodeRootProvider interface { + SuperRootAtTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) +} + +type SuperNodeTraceProvider struct { + PreimagePrestateProvider + logger log.Logger + rootProvider SuperNodeRootProvider + prestateTimestamp uint64 + poststateTimestamp uint64 + l1Head eth.BlockID + gameDepth types.Depth +} + +func NewSuperNodeTraceProvider(logger log.Logger, prestateProvider PreimagePrestateProvider, rootProvider SuperNodeRootProvider, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) *SuperNodeTraceProvider { + return &SuperNodeTraceProvider{ + logger: logger, + PreimagePrestateProvider: prestateProvider, + rootProvider: rootProvider, + prestateTimestamp: prestateTimestamp, + poststateTimestamp: poststateTimestamp, + l1Head: l1Head, + gameDepth: gameDepth, + } +} + +func (s *SuperNodeTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { + preimage, err := s.GetPreimageBytes(ctx, pos) + if err != nil { + return common.Hash{}, err + } + return crypto.Keccak256Hash(preimage), nil +} + +func (s *SuperNodeTraceProvider) getPreimageBytesAtTimestampBoundary(ctx context.Context, timestamp uint64) ([]byte, error) { + root, err := s.rootProvider.SuperRootAtTimestamp(ctx, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to retrieve super root at timestamp %v: %w", timestamp, err) + } + if root.CurrentL1.Number < s.l1Head.Number { + // Node has not processed the game's L1 head so it is not safe to play until it syncs further. + return nil, types2.ErrNotInSync + } + if root.Data == nil { + // No block at this timestamp so it must be invalid + return InvalidTransition, nil + } + if root.Data.VerifiedRequiredL1.Number > s.l1Head.Number { + return InvalidTransition, nil + } + return root.Data.Super.Marshal(), nil +} + +func (s *SuperNodeTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Position) ([]byte, error) { + // Find the timestamp and step at position + timestamp, step, err := s.ComputeStep(pos) + if err != nil { + return nil, err + } + s.logger.Trace("Getting claim", "pos", pos.ToGIndex(), "timestamp", timestamp, "step", step) + if step == 0 { + return s.getPreimageBytesAtTimestampBoundary(ctx, timestamp) + } + // Fetch the super root at the next timestamp since we are part way through the transition to it + prevRoot, err := s.rootProvider.SuperRootAtTimestamp(ctx, timestamp) + if err != nil { + return nil, fmt.Errorf("failed to retrieve previous super root at timestamp %v: %w", timestamp, err) + } + if prevRoot.CurrentL1.Number < s.l1Head.Number { + return nil, types2.ErrNotInSync + } + if prevRoot.Data == nil { + // No block at this timestamp so it must be invalid + return InvalidTransition, nil + } + if prevRoot.Data.VerifiedRequiredL1.Number > s.l1Head.Number { + // The previous root was not safe at the game L1 head so we must have already transitioned to the invalid hash + // prior to this step and it then repeats forever. + return InvalidTransition, nil + } + nextTimestamp := timestamp + 1 + nextRoot, err := s.rootProvider.SuperRootAtTimestamp(ctx, nextTimestamp) + if err != nil { + return nil, fmt.Errorf("failed to retrieve next super root at timestamp %v: %w", nextTimestamp, err) + } + if nextRoot.CurrentL1.Number < s.l1Head.Number { + return nil, types2.ErrNotInSync + } + + prevSuper := prevRoot.Data.Super + expectedState := interopTypes.TransitionState{ + SuperRoot: prevSuper.Marshal(), + PendingProgress: make([]interopTypes.OptimisticBlock, 0, step), + Step: step, + } + + // Should already be sorted but be defensive and sort it ourselves + slices.SortFunc(nextRoot.ChainIDs, func(a, b eth.ChainID) int { + return a.Cmp(b) + }) + for i := uint64(0); i < min(step, uint64(len(nextRoot.ChainIDs))); i++ { + chainID := nextRoot.ChainIDs[i] + // Check if the chain's optimistic root was safe at the game's L1 head + optimistic, ok := nextRoot.OptimisticAtTimestamp[chainID] + if !ok { + // No block at this timestamp for a chain that needs to be processed at this step, so return invalid + return InvalidTransition, nil + } + if optimistic.RequiredL1.Number > s.l1Head.Number { + // Not enough data on L1 to derive the optimistic block, move to invalid transition. + return InvalidTransition, nil + } + + expectedState.PendingProgress = append(expectedState.PendingProgress, interopTypes.OptimisticBlock{ + BlockHash: optimistic.Output.BlockRef.Hash, + OutputRoot: optimistic.Output.OutputRoot, + }) + } + return expectedState.Marshal(), nil +} + +func (s *SuperNodeTraceProvider) ComputeStep(pos types.Position) (timestamp uint64, step uint64, err error) { + bigIdx := pos.TraceIndex(s.gameDepth) + if !bigIdx.IsUint64() { + err = fmt.Errorf("%w: %v", ErrIndexTooBig, bigIdx) + return + } + + traceIdx := bigIdx.Uint64() + 1 + timestampIncrements := traceIdx / StepsPerTimestamp + timestamp = s.prestateTimestamp + timestampIncrements + if timestamp >= s.poststateTimestamp { // Apply trace extension once the claimed timestamp is reached + timestamp = s.poststateTimestamp + step = 0 + } else { + step = traceIdx % StepsPerTimestamp + } + return +} + +func (s *SuperNodeTraceProvider) GetStepData(_ context.Context, _ types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) { + return nil, nil, nil, ErrGetStepData +} + +func (s *SuperNodeTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + // Never need to challenge L2 block number for super root games. + return nil, types.ErrL2BlockNumberValid +} + +var _ types.TraceProvider = (*SuperNodeTraceProvider)(nil) diff --git a/op-challenger/game/fault/trace/super/provider_supernode_test.go b/op-challenger/game/fault/trace/super/provider_supernode_test.go new file mode 100644 index 0000000000000..3c526b528190f --- /dev/null +++ b/op-challenger/game/fault/trace/super/provider_supernode_test.go @@ -0,0 +1,556 @@ +package super + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + types2 "github.com/ethereum-optimism/optimism/op-challenger/game/types" + interopTypes "github.com/ethereum-optimism/optimism/op-program/client/interop/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestSuperNodeProvider_Get(t *testing.T) { + t.Run("AtPostState", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + expectedSuper := eth.NewSuperV1(poststateTimestamp, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0xbb}, + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: l1Head, + Super: expectedSuper, + SuperRoot: eth.SuperRoot(expectedSuper), + }, + } + stubSuperNode.Add(response) + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + require.Equal(t, common.Hash(eth.SuperRoot(expectedSuper)), claim) + }) + + t.Run("AtNewTimestamp", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + expectedSuper := eth.NewSuperV1(prestateTimestamp+1, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0xbb}, + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: l1Head, + Super: expectedSuper, + SuperRoot: eth.SuperRoot(expectedSuper), + }, + } + stubSuperNode.Add(response) + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(StepsPerTimestamp-1))) + require.NoError(t, err) + require.Equal(t, common.Hash(eth.SuperRoot(expectedSuper)), claim) + }) + + t.Run("ValidTransitionBetweenFirstTwoSuperRoots", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + stubSuperNode.Add(prev) + stubSuperNode.Add(next) + + expectSuperNodeValidTransition(t, provider, prev, next) + }) + + t.Run("Step0SuperRootIsSafeBeforeGameL1Head", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + expectedSuper := eth.NewSuperV1(poststateTimestamp, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0xbb}, + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: eth.BlockID{Number: l1Head.Number - 10, Hash: common.Hash{0xcc}}, + Super: expectedSuper, + SuperRoot: eth.SuperRoot(expectedSuper), + }, + } + stubSuperNode.Add(response) + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + require.Equal(t, common.Hash(eth.SuperRoot(expectedSuper)), claim) + }) + + t.Run("Step0SuperRootNotSafeAtGameL1Head", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + expectedSuper := eth.NewSuperV1(poststateTimestamp, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0xbb}, + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xcc}}, + Super: expectedSuper, + SuperRoot: eth.SuperRoot(expectedSuper), + }, + } + stubSuperNode.Add(response) + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + require.Equal(t, InvalidTransitionHash, claim) + }) + + t.Run("NextSuperRootSafeBeforeGameL1Head", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + // Make super roots be safe earlier + prev.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number - 10, Hash: common.Hash{0xaa}} + next.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number - 5, Hash: common.Hash{0xbb}} + stubSuperNode.Add(prev) + stubSuperNode.Add(next) + expectSuperNodeValidTransition(t, provider, prev, next) + }) + + t.Run("PreviousSuperRootNotSafeAtGameL1Head", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xaa}} + next.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number + 2, Hash: common.Hash{0xbb}} + stubSuperNode.Add(prev) + stubSuperNode.Add(next) + + // All steps should be the invalid transition hash. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("FirstChainUnsafe", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number, Hash: common.Hash{0xaa}} + next.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}} + next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(1)] = eth.OutputWithRequiredL1{ + Output: ð.OutputResponse{ + OutputRoot: eth.Bytes32{0xad}, + BlockRef: eth.L2BlockRef{Hash: common.Hash{0xcd}}, + WithdrawalStorageRoot: common.Hash{0xde}, + StateRoot: common.Hash{0xdf}, + }, + RequiredL1: eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}}, + } + stubSuperNode.Add(prev) + stubSuperNode.Add(next) + + // All steps should be the invalid transition hash. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("SecondChainUnsafe", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number, Hash: common.Hash{0xaa}} + next.Data.VerifiedRequiredL1 = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}} + next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(2)] = eth.OutputWithRequiredL1{ + Output: ð.OutputResponse{ + OutputRoot: eth.Bytes32{0xad}, + BlockRef: eth.L2BlockRef{Hash: common.Hash{0xcd}}, + WithdrawalStorageRoot: common.Hash{0xde}, + StateRoot: common.Hash{0xdf}, + }, + RequiredL1: eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}}, + } + stubSuperNode.Add(prev) + stubSuperNode.Add(next) + + // First step should be valid because we can reach the required block on chain 1 + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.NotEqual(t, InvalidTransitionHash, claim, "incorrect claim at index 0") + + // Remaining steps should be the invalid transition hash. + for i := int64(1); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("Step0ForTimestampBeyondChainHead", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + stubSuperNode.AddAtTimestamp(poststateTimestamp, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: nil, + }) + + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + require.Equal(t, InvalidTransitionHash, claim) + }) + + t.Run("NextSuperRootTimestampBeyondAllChainHeads", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, _ := createValidSuperNodeSuperRoots(l1Head) + stubSuperNode.Add(prev) + stubSuperNode.AddAtTimestamp(prestateTimestamp+1, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: prev.ChainIDs, + Data: nil, + }) + + // All steps should be the invalid transition hash as there are no chains with optimistic blocks + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("NextSuperRootTimestampBeyondFirstChainHead", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + stubSuperNode.Add(prev) + stubSuperNode.AddAtTimestamp(prestateTimestamp+1, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: prev.ChainIDs, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + eth.ChainIDFromUInt64(2): next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(2)], + }, + Data: nil, + }) + // All steps should be the invalid transition hash because the first chain is invalid. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("NextSuperRootTimestampBeyondSecondChainHead", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, next := createValidSuperNodeSuperRoots(l1Head) + stubSuperNode.Add(prev) + stubSuperNode.AddAtTimestamp(prestateTimestamp+1, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: next.ChainIDs, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + eth.ChainIDFromUInt64(1): next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(1)], + }, + Data: nil, + }) + // First step should be valid because we can reach the required block on chain 1 + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.NotEqual(t, InvalidTransitionHash, claim, "incorrect claim at index 0") + + // All remaining steps should be the invalid transition hash because the second chain is invalid. + for i := int64(1); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("PreviousSuperRootTimestampBeyondChainHead", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + stubSuperNode.AddAtTimestamp(prestateTimestamp, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: nil, + }) + stubSuperNode.AddAtTimestamp(prestateTimestamp+1, eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: nil, + }) + + // All steps should be the invalid transition hash. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) + } + }) + + t.Run("Step0NotInSync", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + expectedSuper := eth.NewSuperV1(poststateTimestamp, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0xbb}, + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: eth.BlockID{Number: l1Head.Number - 1, Hash: common.Hash{0xaa}}, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xcc}}, + Super: expectedSuper, + SuperRoot: eth.SuperRoot(expectedSuper), + }, + } + stubSuperNode.Add(response) + _, err := provider.Get(context.Background(), types.RootPosition) + require.ErrorIs(t, err, types2.ErrNotInSync) + }) + + t.Run("PreviousSuperRootNotInSync", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + stubSuperNode.AddAtTimestamp(prestateTimestamp, eth.SuperRootAtTimestampResponse{ + CurrentL1: eth.BlockID{Number: l1Head.Number - 1, Hash: common.Hash{0xaa}}, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + }) + _, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + require.ErrorIs(t, err, types2.ErrNotInSync) + }) + + t.Run("NextSuperRootNotInSync", func(t *testing.T) { + provider, stubSuperNode, l1Head := createSuperNodeProvider(t) + prev, _ := createValidSuperNodeSuperRoots(l1Head) + // Previous gives an in sync response + stubSuperNode.Add(prev) + // But next gives an out of sync response + stubSuperNode.AddAtTimestamp(prestateTimestamp+1, eth.SuperRootAtTimestampResponse{ + CurrentL1: eth.BlockID{Number: l1Head.Number - 1, Hash: common.Hash{0xaa}}, + ChainIDs: []eth.ChainID{eth.ChainIDFromUInt64(1), eth.ChainIDFromUInt64(2)}, + }) + _, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + require.ErrorIs(t, err, types2.ErrNotInSync) + }) +} + +func TestSuperNodeProvider_ComputeStep(t *testing.T) { + t.Run("ErrorWhenTraceIndexTooBig", func(t *testing.T) { + // Uses a big game depth so the trace index doesn't fit in uint64 + provider := NewSuperNodeTraceProvider(testlog.Logger(t, log.LvlInfo), nil, &stubSuperNodeRootProvider{}, eth.BlockID{}, 65, prestateTimestamp, poststateTimestamp) + // Left-most position in top game + _, _, err := provider.ComputeStep(types.RootPosition) + require.ErrorIs(t, err, ErrIndexTooBig) + }) + + t.Run("FirstTimestampSteps", func(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + for i := int64(0); i < StepsPerTimestamp-1; i++ { + timestamp, step, err := provider.ComputeStep(types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + // The prestate must be a super root and is on the timestamp boundary. + // So the first step has the same timestamp and increments step from 0 to 1. + require.Equalf(t, prestateTimestamp, timestamp, "Incorrect timestamp at trace index %d", i) + require.Equalf(t, uint64(i+1), step, "Incorrect step at trace index %d", i) + } + }) + + t.Run("SecondTimestampSteps", func(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + for i := int64(-1); i < StepsPerTimestamp-1; i++ { + traceIndex := StepsPerTimestamp + i + timestamp, step, err := provider.ComputeStep(types.NewPosition(gameDepth, big.NewInt(traceIndex))) + require.NoError(t, err) + // We should now be iterating through the steps of the second timestamp - 1s after the prestate + require.Equalf(t, prestateTimestamp+1, timestamp, "Incorrect timestamp at trace index %d", traceIndex) + require.Equalf(t, uint64(i+1), step, "Incorrect step at trace index %d", traceIndex) + } + }) + + t.Run("LimitToPoststateTimestamp", func(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + timestamp, step, err := provider.ComputeStep(types.RootPosition) + require.NoError(t, err) + require.Equal(t, poststateTimestamp, timestamp, "Incorrect timestamp at root position") + require.Equal(t, uint64(0), step, "Incorrect step at trace index at root position") + }) + + t.Run("StepShouldLoopBackToZero", func(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + prevTimestamp := prestateTimestamp + prevStep := uint64(0) // Absolute prestate is always on a timestamp boundary, so step 0 + for traceIndex := int64(0); traceIndex < 5*StepsPerTimestamp; traceIndex++ { + timestamp, step, err := provider.ComputeStep(types.NewPosition(gameDepth, big.NewInt(traceIndex))) + require.NoError(t, err) + if timestamp == prevTimestamp { + require.Equal(t, prevStep+1, step, "Incorrect step at trace index %d", traceIndex) + } else { + require.Equal(t, prevTimestamp+1, timestamp, "Incorrect timestamp at trace index %d", traceIndex) + require.Zero(t, step, "Incorrect step at trace index %d", traceIndex) + require.Equal(t, uint64(StepsPerTimestamp-1), prevStep, "Should only loop back to step 0 after the consolidation step") + } + prevTimestamp = timestamp + prevStep = step + } + }) +} + +func TestSuperNodeProvider_GetStepDataReturnsError(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + _, _, _, err := provider.GetStepData(context.Background(), types.RootPosition) + require.ErrorIs(t, err, ErrGetStepData) +} + +func TestSuperNodeProvider_GetL2BlockNumberChallengeReturnsError(t *testing.T) { + provider, _, _ := createSuperNodeProvider(t) + _, err := provider.GetL2BlockNumberChallenge(context.Background()) + require.ErrorIs(t, err, types.ErrL2BlockNumberValid) +} + +func createSuperNodeProvider(t *testing.T) (*SuperNodeTraceProvider, *stubSuperNodeRootProvider, eth.BlockID) { + logger := testlog.Logger(t, log.LvlInfo) + l1Head := eth.BlockID{Number: 23542, Hash: common.Hash{0xab, 0xcd}} + stubSuperNode := &stubSuperNodeRootProvider{ + rootsByTimestamp: make(map[uint64]eth.SuperRootAtTimestampResponse), + } + provider := NewSuperNodeTraceProvider(logger, nil, stubSuperNode, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) + return provider, stubSuperNode, l1Head +} + +func toOutputResponse(output *eth.OutputV0) *eth.OutputResponse { + return ð.OutputResponse{ + Version: output.Version(), + OutputRoot: eth.OutputRoot(output), + BlockRef: eth.L2BlockRef{ + Hash: output.BlockHash, + }, + WithdrawalStorageRoot: common.Hash(output.MessagePasserStorageRoot), + StateRoot: common.Hash(output.StateRoot), + } +} + +func createValidSuperNodeSuperRoots(l1Head eth.BlockID) (eth.SuperRootAtTimestampResponse, eth.SuperRootAtTimestampResponse) { + rng := rand.New(rand.NewSource(1)) + outputA1 := testutils.RandomOutputV0(rng) + outputA2 := testutils.RandomOutputV0(rng) + outputB1 := testutils.RandomOutputV0(rng) + outputB2 := testutils.RandomOutputV0(rng) + chainID1 := eth.ChainIDFromUInt64(1) + chainID2 := eth.ChainIDFromUInt64(2) + prevSuper := eth.NewSuperV1( + prestateTimestamp, + eth.ChainIDAndOutput{ChainID: chainID1, Output: eth.OutputRoot(outputA1)}, + eth.ChainIDAndOutput{ChainID: chainID2, Output: eth.OutputRoot(outputB1)}) + nextSuper := eth.NewSuperV1(prestateTimestamp+1, + eth.ChainIDAndOutput{ChainID: chainID1, Output: eth.OutputRoot(outputA2)}, + eth.ChainIDAndOutput{ChainID: chainID2, Output: eth.OutputRoot(outputB2)}) + + prevResponse := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{chainID1, chainID2}, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + chainID1: { + Output: toOutputResponse(outputA1), + RequiredL1: l1Head, + }, + chainID2: { + Output: toOutputResponse(outputB1), + RequiredL1: l1Head, + }, + }, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: l1Head, + Super: prevSuper, + SuperRoot: eth.SuperRoot(prevSuper), + }, + } + nextResponse := eth.SuperRootAtTimestampResponse{ + CurrentL1: l1Head, + ChainIDs: []eth.ChainID{chainID1, chainID2}, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + chainID1: { + Output: toOutputResponse(outputA2), + RequiredL1: l1Head, + }, + chainID2: { + Output: toOutputResponse(outputB2), + RequiredL1: l1Head, + }, + }, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: l1Head, + Super: nextSuper, + SuperRoot: eth.SuperRoot(nextSuper), + }, + } + return prevResponse, nextResponse +} + +func expectSuperNodeValidTransition(t *testing.T, provider *SuperNodeTraceProvider, prev eth.SuperRootAtTimestampResponse, next eth.SuperRootAtTimestampResponse) { + chain1OptimisticBlock := interopTypes.OptimisticBlock{ + BlockHash: next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(1)].Output.BlockRef.Hash, + OutputRoot: next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(1)].Output.OutputRoot, + } + chain2OptimisticBlock := interopTypes.OptimisticBlock{ + BlockHash: next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(2)].Output.BlockRef.Hash, + OutputRoot: next.OptimisticAtTimestamp[eth.ChainIDFromUInt64(2)].Output.OutputRoot, + } + expectedFirstStep := &interopTypes.TransitionState{ + SuperRoot: prev.Data.Super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{chain1OptimisticBlock}, + Step: 1, + } + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.Equal(t, expectedFirstStep.Hash(), claim) + + expectedSecondStep := &interopTypes.TransitionState{ + SuperRoot: prev.Data.Super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{chain1OptimisticBlock, chain2OptimisticBlock}, + Step: 2, + } + claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + require.NoError(t, err) + require.Equal(t, expectedSecondStep.Hash(), claim) + + for step := uint64(3); step < StepsPerTimestamp; step++ { + expectedPaddingStep := &interopTypes.TransitionState{ + SuperRoot: prev.Data.Super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{chain1OptimisticBlock, chain2OptimisticBlock}, + Step: step, + } + claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, new(big.Int).SetUint64(step-1))) + require.NoError(t, err) + require.Equalf(t, expectedPaddingStep.Hash(), claim, "incorrect hash at step %v", step) + } +} + +type stubSuperNodeRootProvider struct { + rootsByTimestamp map[uint64]eth.SuperRootAtTimestampResponse +} + +func (s *stubSuperNodeRootProvider) Add(root eth.SuperRootAtTimestampResponse) { + superV1 := root.Data.Super.(*eth.SuperV1) + s.AddAtTimestamp(superV1.Timestamp, root) +} + +func (s *stubSuperNodeRootProvider) AddAtTimestamp(timestamp uint64, root eth.SuperRootAtTimestampResponse) { + if s.rootsByTimestamp == nil { + s.rootsByTimestamp = make(map[uint64]eth.SuperRootAtTimestampResponse) + } + s.rootsByTimestamp[timestamp] = root +} + +func (s *stubSuperNodeRootProvider) SuperRootAtTimestamp(_ context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) { + root, ok := s.rootsByTimestamp[timestamp] + if !ok { + // This is not the not found response - the test just didn't configure a response, so return a generic error + return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("wowsers, now response for timestamp %v", timestamp) + } + return root, nil +} diff --git a/op-challenger/game/fault/trace/super/provider.go b/op-challenger/game/fault/trace/super/provider_supervisor.go similarity index 76% rename from op-challenger/game/fault/trace/super/provider.go rename to op-challenger/game/fault/trace/super/provider_supervisor.go index a3587ed1f5571..3d75b3f7ae53c 100644 --- a/op-challenger/game/fault/trace/super/provider.go +++ b/op-challenger/game/fault/trace/super/provider_supervisor.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" interopTypes "github.com/ethereum-optimism/optimism/op-program/client/interop/types" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -36,7 +37,12 @@ type RootProvider interface { AllSafeDerivedAt(ctx context.Context, derivedFrom eth.BlockID) (map[eth.ChainID]eth.BlockID, error) } -type SuperTraceProvider struct { +type SuperTraceProvider interface { + types.TraceProvider + PreimageTraceProvider +} + +type SupervisorSuperTraceProvider struct { PreimagePrestateProvider logger log.Logger rollupCfgs *RollupConfigs @@ -47,8 +53,18 @@ type SuperTraceProvider struct { gameDepth types.Depth } -func NewSuperTraceProvider(logger log.Logger, rollupCfgs *RollupConfigs, prestateProvider PreimagePrestateProvider, rootProvider RootProvider, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) *SuperTraceProvider { - return &SuperTraceProvider{ +func NewSuperTraceProvider(logger log.Logger, rollupCfgs *RollupConfigs, prestateProvider PreimagePrestateProvider, rootProvider *sources.SupervisorClient, superNodeProvider *sources.SuperNodeClient, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) SuperTraceProvider { + if rootProvider == nil && superNodeProvider != nil { + return NewSuperNodeTraceProvider(logger, prestateProvider, superNodeProvider, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) + } else if rootProvider != nil && superNodeProvider == nil { + return NewSupervisorSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) + } else { + panic(fmt.Sprintf("Invalid configuration: must provide either a super node provider or a root provider, but not both. Root provider: %v, SuperNodeProvider: %v", rootProvider, superNodeProvider)) + } +} + +func NewSupervisorSuperTraceProvider(logger log.Logger, rollupCfgs *RollupConfigs, prestateProvider PreimagePrestateProvider, rootProvider RootProvider, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) *SupervisorSuperTraceProvider { + return &SupervisorSuperTraceProvider{ logger: logger, rollupCfgs: rollupCfgs, PreimagePrestateProvider: prestateProvider, @@ -60,7 +76,7 @@ func NewSuperTraceProvider(logger log.Logger, rollupCfgs *RollupConfigs, prestat } } -func (s *SuperTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { +func (s *SupervisorSuperTraceProvider) Get(ctx context.Context, pos types.Position) (common.Hash, error) { preimage, err := s.GetPreimageBytes(ctx, pos) if err != nil { return common.Hash{}, err @@ -68,7 +84,7 @@ func (s *SuperTraceProvider) Get(ctx context.Context, pos types.Position) (commo return crypto.Keccak256Hash(preimage), nil } -func (s *SuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Position) ([]byte, error) { +func (s *SupervisorSuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Position) ([]byte, error) { // Find the timestamp and step at position timestamp, step, err := s.ComputeStep(pos) if err != nil { @@ -165,7 +181,7 @@ func (s *SuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Pos return expectedState.Marshal(), nil } -func (s *SuperTraceProvider) ComputeStep(pos types.Position) (timestamp uint64, step uint64, err error) { +func (s *SupervisorSuperTraceProvider) ComputeStep(pos types.Position) (timestamp uint64, step uint64, err error) { bigIdx := pos.TraceIndex(s.gameDepth) if !bigIdx.IsUint64() { err = fmt.Errorf("%w: %v", ErrIndexTooBig, bigIdx) @@ -184,13 +200,14 @@ func (s *SuperTraceProvider) ComputeStep(pos types.Position) (timestamp uint64, return } -func (s *SuperTraceProvider) GetStepData(_ context.Context, _ types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) { +func (s *SupervisorSuperTraceProvider) GetStepData(_ context.Context, _ types.Position) (prestate []byte, proofData []byte, preimageData *types.PreimageOracleData, err error) { return nil, nil, nil, ErrGetStepData } -func (s *SuperTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { +func (s *SupervisorSuperTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { // Never need to challenge L2 block number for super root games. return nil, types.ErrL2BlockNumberValid } -var _ types.TraceProvider = (*SuperTraceProvider)(nil) +var _ types.TraceProvider = (*SupervisorSuperTraceProvider)(nil) +var _ SuperTraceProvider = (*SupervisorSuperTraceProvider)(nil) diff --git a/op-challenger/game/fault/trace/super/provider_test.go b/op-challenger/game/fault/trace/super/provider_supervisor_test.go similarity index 97% rename from op-challenger/game/fault/trace/super/provider_test.go rename to op-challenger/game/fault/trace/super/provider_supervisor_test.go index 4fd59c9716abb..0da4a56af12f9 100644 --- a/op-challenger/game/fault/trace/super/provider_test.go +++ b/op-challenger/game/fault/trace/super/provider_supervisor_test.go @@ -268,7 +268,7 @@ func TestComputeStep(t *testing.T) { rollupCfgs, err := NewRollupConfigs(vm.Config{}) require.NoError(t, err) // Uses a big game depth so the trace index doesn't fit in uint64 - provider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), rollupCfgs, nil, &stubRootProvider{}, eth.BlockID{}, 65, prestateTimestamp, poststateTimestamp) + provider := NewSupervisorSuperTraceProvider(testlog.Logger(t, log.LvlInfo), rollupCfgs, nil, &stubRootProvider{}, eth.BlockID{}, 65, prestateTimestamp, poststateTimestamp) // Left-most position in top game _, _, err = provider.ComputeStep(types.RootPosition) require.ErrorIs(t, err, ErrIndexTooBig) @@ -326,7 +326,7 @@ func TestComputeStep(t *testing.T) { }) } -func createProvider(t *testing.T) (*SuperTraceProvider, *stubRootProvider, eth.BlockID, *RollupConfigs) { +func createProvider(t *testing.T) (*SupervisorSuperTraceProvider, *stubRootProvider, eth.BlockID, *RollupConfigs) { logger := testlog.Logger(t, log.LvlInfo) l1Head := eth.BlockID{Number: 23542, Hash: common.Hash{0xab, 0xcd}} stubSupervisor := &stubRootProvider{ @@ -348,7 +348,7 @@ func createProvider(t *testing.T) (*SuperTraceProvider, *stubRootProvider, eth.B } rollupCfgs, err := NewRollupConfigsFromParsed(chain1Cfg, chain2Cfg) require.NoError(t, err) - provider := NewSuperTraceProvider(logger, rollupCfgs, nil, stubSupervisor, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) + provider := NewSupervisorSuperTraceProvider(logger, rollupCfgs, nil, stubSupervisor, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) return provider, stubSupervisor, l1Head, rollupCfgs } @@ -423,7 +423,7 @@ func createValidSuperRoots(l1Head eth.BlockID) (superRootData, superRootData) { return prev, next } -func expectValidTransition(t *testing.T, provider *SuperTraceProvider, prev superRootData, next superRootData) { +func expectValidTransition(t *testing.T, provider *SupervisorSuperTraceProvider, prev superRootData, next superRootData) { expectedFirstStep := &interopTypes.TransitionState{ SuperRoot: prev.super.Marshal(), PendingProgress: []interopTypes.OptimisticBlock{ diff --git a/op-challenger/game/fault/trace/super/split_adapter_test.go b/op-challenger/game/fault/trace/super/split_adapter_test.go index 7d1f4ffa35fb3..1808dddd657f5 100644 --- a/op-challenger/game/fault/trace/super/split_adapter_test.go +++ b/op-challenger/game/fault/trace/super/split_adapter_test.go @@ -123,7 +123,7 @@ func setupSplitAdapterTest(t *testing.T, depth types.Depth, prestateTimestamp ui creator := &capturingCreator{} rootProvider := &stubRootProvider{} prestateProvider := NewSuperRootPrestateProvider(rootProvider, prestateTimestamp) - traceProvider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), nil, prestateProvider, rootProvider, eth.BlockID{}, depth, prestateTimestamp, poststateTimestamp) + traceProvider := NewSupervisorSuperTraceProvider(testlog.Logger(t, log.LvlInfo), nil, prestateProvider, rootProvider, eth.BlockID{}, depth, prestateTimestamp, poststateTimestamp) adapter := SuperRootSplitAdapter(traceProvider, creator.Create) return creator, rootProvider, adapter } diff --git a/op-challenger/game/fault/trace/super/super_asterisc_kona.go b/op-challenger/game/fault/trace/super/super_asterisc_kona.go deleted file mode 100644 index 259f4c4ff66ae..0000000000000 --- a/op-challenger/game/fault/trace/super/super_asterisc_kona.go +++ /dev/null @@ -1,57 +0,0 @@ -package super - -import ( - "context" - "fmt" - "math/big" - "path/filepath" - - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/split" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum-optimism/optimism/op-challenger/metrics" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" -) - -func NewSuperAsteriscKonaTraceAccessor( - logger log.Logger, - m metrics.Metricer, - cfg vm.Config, - serverExecutor vm.OracleServerExecutor, - prestateProvider PreimagePrestateProvider, - rootProvider RootProvider, - asteriscPrestate string, - dir string, - l1Head eth.BlockID, - splitDepth types.Depth, - prestateTimestamp uint64, - poststateTimestamp uint64, -) (*trace.Accessor, error) { - rollupCfgs, err := NewRollupConfigs(cfg) - if err != nil { - return nil, fmt.Errorf("failed to load rollup configs: %w", err) - } - outputProvider := NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) - asteriscCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, claimInfo ClaimInfo) (types.TraceProvider, error) { - logger := logger.New("agreedPrestate", hexutil.Bytes(claimInfo.AgreedPrestate), "claim", claimInfo.Claim, "localContext", localContext) - subdir := filepath.Join(dir, localContext.Hex()) - localInputs := utils.LocalGameInputs{ - L1Head: l1Head.Hash, - AgreedPreState: claimInfo.AgreedPrestate, - L2Claim: claimInfo.Claim, - L2SequenceNumber: new(big.Int).SetUint64(poststateTimestamp), - } - provider := asterisc.NewTraceProvider(logger, m.ToTypedVmMetrics(cfg.VmType.String()), cfg, serverExecutor, prestateProvider, asteriscPrestate, localInputs, subdir, depth) - return provider, nil - } - - cache := NewProviderCache(m, "super_asterisc_kona_provider", asteriscCreator) - selector := split.NewSplitProviderSelector(outputProvider, splitDepth, SuperRootSplitAdapter(outputProvider, cache.GetOrCreate)) - return trace.NewAccessor(selector), nil -} diff --git a/op-challenger/game/fault/trace/super/super_cannon.go b/op-challenger/game/fault/trace/super/super_cannon.go index 7c32773cb26a0..9fcc9d634cc90 100644 --- a/op-challenger/game/fault/trace/super/super_cannon.go +++ b/op-challenger/game/fault/trace/super/super_cannon.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-challenger/metrics" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" @@ -25,7 +26,8 @@ func NewSuperCannonTraceAccessor( cfg vm.Config, serverExecutor vm.OracleServerExecutor, prestateProvider PreimagePrestateProvider, - rootProvider RootProvider, + rootProvider *sources.SupervisorClient, + superNodeProvider *sources.SuperNodeClient, cannonPrestate string, dir string, l1Head eth.BlockID, @@ -37,7 +39,7 @@ func NewSuperCannonTraceAccessor( if err != nil { return nil, fmt.Errorf("failed to load rollup configs: %w", err) } - outputProvider := NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) + outputProvider := NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, superNodeProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) cannonCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, claimInfo ClaimInfo) (types.TraceProvider, error) { logger := logger.New("agreedPrestate", hexutil.Bytes(claimInfo.AgreedPrestate), "claim", claimInfo.Claim, "localContext", localContext) subdir := filepath.Join(dir, localContext.Hex()) diff --git a/op-challenger/game/fault/trace/utils/local.go b/op-challenger/game/fault/trace/utils/local.go index 85f64e4021121..86dc3ac4b4646 100644 --- a/op-challenger/game/fault/trace/utils/local.go +++ b/op-challenger/game/fault/trace/utils/local.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/big" + "time" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -50,7 +51,9 @@ func FetchLocalInputs(ctx context.Context, caller GameInputsSource, l2Client L2H } func FetchLocalInputsFromProposals(ctx context.Context, l1Head common.Hash, l2Client L2HeaderSource, agreedOutput Proposal, claimedOutput Proposal) (LocalGameInputs, error) { - agreedHeader, err := l2Client.HeaderByNumber(ctx, agreedOutput.L2BlockNumber) + tCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + agreedHeader, err := l2Client.HeaderByNumber(tCtx, agreedOutput.L2BlockNumber) if err != nil { return LocalGameInputs{}, fmt.Errorf("fetch L2 block header %v: %w", agreedOutput.L2BlockNumber, err) } diff --git a/op-challenger/game/fault/trace/vm/executor_test.go b/op-challenger/game/fault/trace/vm/executor_test.go index d6bdf48feddd6..d401b2d5b9947 100644 --- a/op-challenger/game/fault/trace/vm/executor_test.go +++ b/op-challenger/game/fault/trace/vm/executor_test.go @@ -62,7 +62,7 @@ func TestGenerateProof(t *testing.T) { cfg.DebugInfo = true _, _, args := captureExec(t, dir, cfg, inputs, info, math.MaxUint64, m) // stop-at would need to be one more than the proof step which would overflow back to 0 - // so expect that it will be omitted. We'll ultimately want asterisc to execute until the program exits. + // so expect that it will be omitted. We'll ultimately want the vm to execute until the program exits. require.NotContains(t, args, "--stop-at") validateMetrics(t, m, info, cfg) }) diff --git a/op-challenger/game/generic/player.go b/op-challenger/game/generic/player.go index 5545a51b7095f..99643de23c7bb 100644 --- a/op-challenger/game/generic/player.go +++ b/op-challenger/game/generic/player.go @@ -5,11 +5,9 @@ import ( "errors" "fmt" - "github.com/ethereum-optimism/optimism/op-challenger/game/client" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" - gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -27,14 +25,8 @@ type GenericGameLoader interface { GetStatus(context.Context) (gameTypes.GameStatus, error) } -type SyncValidator interface { - // ValidateNodeSynced checks that the local node is sufficiently up to date to play the game. - // It returns client.ErrNotInSync if the node is too far behind. - ValidateNodeSynced(ctx context.Context, gameL1Head eth.BlockID) error -} - type L1HeaderSource interface { - HeaderByHash(context.Context, common.Hash) (*gethTypes.Header, error) + BlockRefByHash(ctx context.Context, hash common.Hash) (eth.BlockRef, error) } type ActorCreator func(ctx context.Context, logger log.Logger, l1Head eth.BlockID) (Actor, error) @@ -43,7 +35,7 @@ type GamePlayer struct { actor Actor loader GenericGameLoader logger log.Logger - syncValidator SyncValidator + syncValidator gameTypes.SyncValidator prestateValidators []PrestateValidator status gameTypes.GameStatus gameL1Head eth.BlockID @@ -59,7 +51,7 @@ func NewGenericGamePlayer( logger log.Logger, addr common.Address, loader GenericGameLoader, - syncValidator SyncValidator, + syncValidator gameTypes.SyncValidator, validators []PrestateValidator, l1HeaderSource L1HeaderSource, createActor ActorCreator, @@ -86,11 +78,11 @@ func NewGenericGamePlayer( if err != nil { return nil, fmt.Errorf("failed to load game L1 head: %w", err) } - l1Header, err := l1HeaderSource.HeaderByHash(ctx, l1HeadHash) + l1Header, err := l1HeaderSource.BlockRefByHash(ctx, l1HeadHash) if err != nil { return nil, fmt.Errorf("failed to load L1 header %v: %w", l1HeadHash, err) } - l1Head := eth.HeaderBlockID(l1Header) + l1Head := l1Header.ID() actor, err := createActor(ctx, logger, l1Head) if err != nil { @@ -127,7 +119,7 @@ func (g *GamePlayer) ProgressGame(ctx context.Context) gameTypes.GameStatus { g.logger.Trace("Skipping completed game") return g.status } - if err := g.syncValidator.ValidateNodeSynced(ctx, g.gameL1Head); errors.Is(err, client.ErrNotInSync) { + if err := g.syncValidator.ValidateNodeSynced(ctx, g.gameL1Head); errors.Is(err, gameTypes.ErrNotInSync) { g.logger.Warn("Local node not sufficiently up to date", "err", err) return g.status } else if err != nil { diff --git a/op-challenger/game/keccak/fetcher/fetcher.go b/op-challenger/game/keccak/fetcher/fetcher.go index 589bca09d9f1e..fa1443acd2996 100644 --- a/op-challenger/game/keccak/fetcher/fetcher.go +++ b/op-challenger/game/keccak/fetcher/fetcher.go @@ -8,6 +8,8 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" keccakTypes "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -19,9 +21,8 @@ var ( ) type L1Source interface { - BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) - TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) - ChainID(ctx context.Context) (*big.Int, error) + BlockRefByNumber(ctx context.Context, num uint64) (eth.BlockRef, error) + apis.ReceiptsFetcher } type Oracle interface { @@ -43,12 +44,16 @@ func (f *InputFetcher) FetchInputs(ctx context.Context, blockHash common.Hash, o var inputs []keccakTypes.InputData for _, blockNum := range blockNums { foundRelevantTx := false - block, err := f.source.BlockByNumber(ctx, new(big.Int).SetUint64(blockNum)) + blockRef, err := f.source.BlockRefByNumber(ctx, blockNum) if err != nil { - return nil, fmt.Errorf("failed getting tx for block %v: %w", blockNum, err) + return nil, fmt.Errorf("failed getting info for block %v: %w", blockNum, err) } - for _, tx := range block.Transactions() { - inputData, err := f.extractRelevantLeavesFromTx(ctx, oracle, tx, ident) + _, receipts, err := f.source.FetchReceipts(ctx, blockRef.Hash) + if err != nil { + return nil, fmt.Errorf("failed to retrieve receipts for block %v: %w", blockNum, err) + } + for _, rcpt := range receipts { + inputData, err := f.extractRelevantLeavesFromReceipt(rcpt, oracle, ident) if err != nil { return nil, err } @@ -67,13 +72,9 @@ func (f *InputFetcher) FetchInputs(ctx context.Context, blockHash common.Hash, o return inputs, nil } -func (f *InputFetcher) extractRelevantLeavesFromTx(ctx context.Context, oracle Oracle, tx *types.Transaction, ident keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) { - rcpt, err := f.source.TransactionReceipt(ctx, tx.Hash()) - if err != nil { - return nil, fmt.Errorf("failed to retrieve receipt for tx %v: %w", tx.Hash(), err) - } +func (f *InputFetcher) extractRelevantLeavesFromReceipt(rcpt *types.Receipt, oracle Oracle, ident keccakTypes.LargePreimageIdent) ([]keccakTypes.InputData, error) { if rcpt.Status != types.ReceiptStatusSuccessful { - f.log.Trace("Skipping transaction with failed receipt status", "tx", tx.Hash(), "status", rcpt.Status) + f.log.Trace("Skipping transaction with failed receipt status", "tx", rcpt.TxHash, "status", rcpt.Status) return nil, nil } @@ -81,29 +82,29 @@ func (f *InputFetcher) extractRelevantLeavesFromTx(ctx context.Context, oracle O var inputs []keccakTypes.InputData for i, txLog := range rcpt.Logs { if txLog.Address != oracle.Addr() { - f.log.Trace("Skip tx log not emitted by the oracle contract", "tx", tx.Hash(), "logIndex", i, "targetContract", oracle.Addr(), "actualContract", txLog.Address) + f.log.Trace("Skip tx log not emitted by the oracle contract", "tx", rcpt.TxHash, "logIndex", i, "targetContract", oracle.Addr(), "actualContract", txLog.Address) continue } if len(txLog.Data) < 20 { - f.log.Trace("Skip tx log with insufficient data (less than 20 bytes)", "tx", tx.Hash(), "logIndex", i, "dataLength", len(txLog.Data)) + f.log.Trace("Skip tx log with insufficient data (less than 20 bytes)", "tx", rcpt.TxHash, "logIndex", i, "dataLength", len(txLog.Data)) continue } caller := common.Address(txLog.Data[0:20]) callData := txLog.Data[20:] if caller != ident.Claimant { - f.log.Trace("Skip tx log from irrelevant claimant", "tx", tx.Hash(), "logIndex", i, "targetClaimant", ident.Claimant, "actualClaimant", caller) + f.log.Trace("Skip tx log from irrelevant claimant", "tx", rcpt.TxHash, "logIndex", i, "targetClaimant", ident.Claimant, "actualClaimant", caller) continue } uuid, inputData, err := oracle.DecodeInputData(callData) if errors.Is(err, contracts.ErrInvalidAddLeavesCall) { - f.log.Trace("Skip tx log with call data not targeting expected method", "tx", tx.Hash(), "logIndex", i, "err", err) + f.log.Trace("Skip tx log with call data not targeting expected method", "tx", rcpt.TxHash, "logIndex", i, "err", err) continue } else if err != nil { return nil, err } if uuid.Cmp(ident.UUID) != 0 { - f.log.Trace("Skip tx log with irrelevant UUID", "tx", tx.Hash(), "logIndex", i, "targetUUID", ident.UUID, "actualUUID", uuid) + f.log.Trace("Skip tx log with irrelevant UUID", "tx", rcpt.TxHash, "logIndex", i, "targetUUID", ident.UUID, "actualUUID", uuid) continue } inputs = append(inputs, inputData) diff --git a/op-challenger/game/keccak/fetcher/fetcher_test.go b/op-challenger/game/keccak/fetcher/fetcher_test.go index 2a976b3e6df02..4083935a305bc 100644 --- a/op-challenger/game/keccak/fetcher/fetcher_test.go +++ b/op-challenger/game/keccak/fetcher/fetcher_test.go @@ -2,15 +2,15 @@ package fetcher import ( "context" - "crypto/ecdsa" + "encoding/binary" "errors" "fmt" - "math" "math/big" "testing" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" keccakTypes "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" @@ -20,21 +20,14 @@ import ( "github.com/stretchr/testify/require" ) -const ( - // Signal to indicate a receipt should be considered missing - MissingReceiptStatus = math.MaxUint64 -) - var ( oracleAddr = common.Address{0x99, 0x98} otherAddr = common.Address{0x12, 0x34} claimantKey, _ = crypto.GenerateKey() - otherKey, _ = crypto.GenerateKey() ident = keccakTypes.LargePreimageIdent{ Claimant: crypto.PubkeyToAddress(claimantKey.PublicKey), UUID: big.NewInt(888), } - chainID = big.NewInt(123) blockHash = common.Hash{0xdd} input1 = keccakTypes.InputData{ Input: []byte{0xbb, 0x11}, @@ -80,36 +73,21 @@ func TestFetchLeaves_ErrorOnUnavailableL1Block(t *testing.T) { // No txs means stubL1Source will return an error when we try to fetch the block leaves, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) - require.ErrorContains(t, err, fmt.Sprintf("failed getting tx for block %v", blockNum)) + require.ErrorContains(t, err, fmt.Sprintf("failed getting info for block %v", blockNum)) require.Empty(t, leaves) } func TestFetchLeaves_SingleTxSingleLog(t *testing.T) { - cases := []struct { - name string - txSender *ecdsa.PrivateKey - txModifier TxModifier - }{ - {"from EOA claimant address", claimantKey, ValidTx}, - {"from contract call", otherKey, WithToAddr(otherAddr)}, - {"from contract creation", otherKey, WithoutToAddr()}, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - fetcher, oracle, l1Source := setupFetcherTest(t) - blockNum := uint64(7) - oracle.leafBlocks = []uint64{blockNum} + fetcher, oracle, l1Source := setupFetcherTest(t) + blockNum := uint64(7) + oracle.leafBlocks = []uint64{blockNum} - proposal := oracle.createProposal(input1) - tx := l1Source.createTx(blockNum, tc.txSender, tc.txModifier) - l1Source.createLog(tx, proposal) + proposal := oracle.createProposal(input1) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal) - inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) - require.NoError(t, err) - require.Equal(t, []keccakTypes.InputData{input1}, inputs) - }) - } + inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) + require.NoError(t, err) + require.Equal(t, []keccakTypes.InputData{input1}, inputs) } func TestFetchLeaves_SingleTxMultipleLogs(t *testing.T) { @@ -119,9 +97,7 @@ func TestFetchLeaves_SingleTxMultipleLogs(t *testing.T) { proposal1 := oracle.createProposal(input1) proposal2 := oracle.createProposal(input2) - tx := l1Source.createTx(blockNum, otherKey, WithToAddr(otherAddr)) - l1Source.createLog(tx, proposal1) - l1Source.createLog(tx, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -138,14 +114,10 @@ func TestFetchLeaves_MultipleBlocksAndLeaves(t *testing.T) { proposal2 := oracle.createProposal(input2) proposal3 := oracle.createProposal(input3) proposal4 := oracle.createProposal(input4) - block1Tx := l1Source.createTx(block1, claimantKey, ValidTx) - block2TxA := l1Source.createTx(block2, claimantKey, ValidTx) - l1Source.createTx(block2, claimantKey, ValidTx) // Add tx with no logs - block2TxB := l1Source.createTx(block2, otherKey, WithoutToAddr()) - l1Source.createLog(block1Tx, proposal1) - l1Source.createLog(block2TxA, proposal2) - l1Source.createLog(block2TxB, proposal3) - l1Source.createLog(block2TxB, proposal4) + l1Source.createReceipt(block1, types.ReceiptStatusSuccessful, proposal1) + l1Source.createReceipt(block1, types.ReceiptStatusSuccessful, proposal2) + l1Source.createReceipt(block2, types.ReceiptStatusSuccessful) // Add tx with no logs + l1Source.createReceipt(block2, types.ReceiptStatusSuccessful, proposal3, proposal4) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -159,13 +131,12 @@ func TestFetchLeaves_SkipLogFromWrongContract(t *testing.T) { // Emit log from an irrelevant contract address proposal1 := oracle.createProposal(input2) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - log1 := l1Source.createLog(tx1, proposal1) - log1.Address = otherAddr + rcpt := l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) + rcpt.Logs[0].Address = otherAddr + // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -180,12 +151,11 @@ func TestFetchLeaves_SkipProposalWithWrongUUID(t *testing.T) { // Valid tx but with a different UUID proposal1 := oracle.createProposal(input2) proposal1.uuid = big.NewInt(874927294) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) + // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -200,12 +170,10 @@ func TestFetchLeaves_SkipProposalWithWrongClaimant(t *testing.T) { // Valid tx but with a different claimant proposal1 := oracle.createProposal(input2) proposal1.claimantAddr = otherAddr - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -220,12 +188,10 @@ func TestFetchLeaves_SkipInvalidProposal(t *testing.T) { // Set up proposal decoding to fail proposal1 := oracle.createProposal(input2) proposal1.valid = false - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -240,13 +206,11 @@ func TestFetchLeaves_SkipProposalWithInsufficientData(t *testing.T) { // Log contains insufficient data // It should hold a 20 byte address followed by the proposal payload proposal1 := oracle.createProposal(input2) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - log1 := l1Source.createLog(tx1, proposal1) - log1.Data = proposal1.claimantAddr[:19] + rcpt1 := l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) + rcpt1.Logs[0].Data = proposal1.claimantAddr[:19] // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -260,13 +224,11 @@ func TestFetchLeaves_SkipProposalMissingCallData(t *testing.T) { // Truncate call data from log so that is only contains an address proposal1 := oracle.createProposal(input2) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - log1 := l1Source.createLog(tx1, proposal1) - log1.Data = log1.Data[0:20] + rcpt1 := l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal1) + rcpt1.Logs[0].Data = rcpt1.Logs[0].Data[0:20] // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) @@ -280,36 +242,26 @@ func TestFetchLeaves_SkipTxWithReceiptStatusFail(t *testing.T) { // Valid proposal, but tx reverted proposal1 := oracle.createProposal(input2) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) - l1Source.rcptStatus[tx1.Hash()] = types.ReceiptStatusFailed + l1Source.createReceipt(blockNum, types.ReceiptStatusFailed, proposal1) // Valid tx proposal2 := oracle.createProposal(input1) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful, proposal2) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.NoError(t, err) require.Equal(t, []keccakTypes.InputData{input1}, inputs) } -func TestFetchLeaves_ErrorsOnMissingReceipt(t *testing.T) { +func TestFetchLeaves_ErrorsOnMissingReceipts(t *testing.T) { fetcher, oracle, l1Source := setupFetcherTest(t) blockNum := uint64(7) oracle.leafBlocks = []uint64{blockNum} - // Valid tx - proposal1 := oracle.createProposal(input1) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) - // Valid proposal, but tx receipt is missing - proposal2 := oracle.createProposal(input2) - tx2 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx2, proposal2) - l1Source.rcptStatus[tx2.Hash()] = MissingReceiptStatus + // Block exists but receipts return not found + l1Source.blocks[blockNum] = uint64ToHash(blockNum) input, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) - require.ErrorContains(t, err, fmt.Sprintf("failed to retrieve receipt for tx %v", tx2.Hash())) + require.ErrorContains(t, err, fmt.Sprintf("failed to retrieve receipts for block %v", blockNum)) require.Nil(t, input) } @@ -320,11 +272,9 @@ func TestFetchLeaves_ErrorsWhenNoValidLeavesInBlock(t *testing.T) { // Irrelevant tx - reverted proposal1 := oracle.createProposal(input2) - tx1 := l1Source.createTx(blockNum, claimantKey, ValidTx) - l1Source.createLog(tx1, proposal1) - l1Source.rcptStatus[tx1.Hash()] = types.ReceiptStatusFailed + l1Source.createReceipt(blockNum, types.ReceiptStatusFailed, proposal1) // Irrelevant tx - no logs are emitted - l1Source.createTx(blockNum, claimantKey, ValidTx) + l1Source.createReceipt(blockNum, types.ReceiptStatusSuccessful) inputs, err := fetcher.FetchInputs(context.Background(), blockHash, oracle, ident) require.ErrorIs(t, err, ErrNoLeavesFound) @@ -336,6 +286,8 @@ func setupFetcherTest(t *testing.T) (*InputFetcher, *stubOracle, *stubL1Source) proposals: make(map[byte]*proposalConfig), } l1Source := &stubL1Source{ + blocks: make(map[uint64]common.Hash), + rcpts: make(map[common.Hash]types.Receipts), txs: make(map[uint64]types.Transactions), rcptStatus: make(map[common.Hash]uint64), logs: make(map[common.Hash][]*types.Log), @@ -384,24 +336,6 @@ func (o *stubOracle) DecodeInputData(data []byte) (*big.Int, keccakTypes.InputDa return proposal.uuid, proposal.inputData, nil } -type TxModifier func(tx *types.DynamicFeeTx) - -var ValidTx TxModifier = func(_ *types.DynamicFeeTx) { - // no-op -} - -func WithToAddr(addr common.Address) TxModifier { - return func(tx *types.DynamicFeeTx) { - tx.To = &addr - } -} - -func WithoutToAddr() TxModifier { - return func(tx *types.DynamicFeeTx) { - tx.To = nil - } -} - func (o *stubOracle) createProposal(input keccakTypes.InputData) *proposalConfig { id := o.nextProposalId o.nextProposalId++ @@ -420,6 +354,11 @@ func (o *stubOracle) createProposal(input keccakTypes.InputData) *proposalConfig type stubL1Source struct { nextTxId uint64 + + // Map block number to block hash + blocks map[uint64]common.Hash + // Map block hash to receipts + rcpts map[common.Hash]types.Receipts // Map block number to tx txs map[uint64]types.Transactions // Map txHash to receipt @@ -428,78 +367,62 @@ type stubL1Source struct { logs map[common.Hash][]*types.Log } -func (s *stubL1Source) ChainID(_ context.Context) (*big.Int, error) { - return chainID, nil -} - -func (s *stubL1Source) BlockByNumber(_ context.Context, number *big.Int) (*types.Block, error) { - txs, ok := s.txs[number.Uint64()] +func (s *stubL1Source) BlockRefByNumber(_ context.Context, num uint64) (eth.BlockRef, error) { + hash, ok := s.blocks[num] if !ok { - return nil, errors.New("not found") + return eth.BlockRef{}, errors.New("not found") } - return (&types.Block{}).WithBody(types.Body{Transactions: txs}), nil + return eth.BlockRef{ + Number: num, + Hash: hash, + }, nil } -func (s *stubL1Source) TransactionReceipt(_ context.Context, txHash common.Hash) (*types.Receipt, error) { - rcptStatus, ok := s.rcptStatus[txHash] +func (s *stubL1Source) FetchReceipts(_ context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error) { + rcpts, ok := s.rcpts[blockHash] if !ok { - rcptStatus = types.ReceiptStatusSuccessful - } else if rcptStatus == MissingReceiptStatus { - return nil, errors.New("not found") + return nil, nil, errors.New("not found") } + return nil, rcpts, nil +} - logs := s.logs[txHash] - return &types.Receipt{Status: rcptStatus, Logs: logs}, nil +func uint64ToHash(num uint64) common.Hash { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return crypto.Keccak256Hash(data) } -func (s *stubL1Source) createTx(blockNum uint64, key *ecdsa.PrivateKey, txMod TxModifier) *types.Transaction { +func (s *stubL1Source) createReceipt(blockNum uint64, status uint64, proposals ...*proposalConfig) *types.Receipt { + // Make the block exist + s.blocks[blockNum] = uint64ToHash(blockNum) + txId := s.nextTxId s.nextTxId++ - inner := &types.DynamicFeeTx{ - ChainID: chainID, - Nonce: txId, - To: &oracleAddr, - Value: big.NewInt(0), - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(2), - Gas: 3, - Data: []byte{}, + logs := make([]*types.Log, len(proposals)) + for i, proposal := range proposals { + // Concat the claimant address and the proposal id + // These will be split back into address and id in fetcher.extractRelevantLeavesFromTx + data := append(proposal.claimantAddr[:], proposal.id) + + txLog := &types.Log{ + Address: oracleAddr, + Data: data, + Topics: []common.Hash{}, + + // ignored (zeroed): + BlockNumber: 0, + TxHash: common.Hash{}, + TxIndex: 0, + BlockHash: common.Hash{}, + Index: 0, + Removed: false, + } + logs[i] = txLog } - txMod(inner) - tx := types.MustSignNewTx(key, types.LatestSignerForChainID(inner.ChainID), inner) - - // Track tx internally - txSet := s.txs[blockNum] - txSet = append(txSet, tx) - s.txs[blockNum] = txSet - - return tx -} - -func (s *stubL1Source) createLog(tx *types.Transaction, proposal *proposalConfig) *types.Log { - // Concat the claimant address and the proposal id - // These will be split back into address and id in fetcher.extractRelevantLeavesFromTx - data := append(proposal.claimantAddr[:], proposal.id) - - txLog := &types.Log{ - Address: oracleAddr, - Data: data, - Topics: []common.Hash{}, - - // ignored (zeroed): - BlockNumber: 0, - TxHash: common.Hash{}, - TxIndex: 0, - BlockHash: common.Hash{}, - Index: 0, - Removed: false, - } - - // Track tx log - logSet := s.logs[tx.Hash()] - logSet = append(logSet, txLog) - s.logs[tx.Hash()] = logSet - - return txLog + rcpt := &types.Receipt{TxHash: uint64ToHash(txId), Status: status, Logs: logs} + blockHash := s.blocks[blockNum] + rcpts := s.rcpts[blockHash] + s.rcpts[blockHash] = append(rcpts, rcpt) + return rcpt } diff --git a/op-challenger/game/service.go b/op-challenger/game/service.go index 8188667f5ed47..32635d087b9d0 100644 --- a/op-challenger/game/service.go +++ b/op-challenger/game/service.go @@ -6,12 +6,14 @@ import ( "fmt" "io" "sync/atomic" + "time" challengerClient "github.com/ethereum-optimism/optimism/op-challenger/game/client" "github.com/ethereum-optimism/optimism/op-challenger/game/keccak" "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/fetcher" "github.com/ethereum-optimism/optimism/op-challenger/game/zk" "github.com/ethereum-optimism/optimism/op-challenger/sender" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" @@ -57,8 +59,9 @@ type Service struct { registry *registry.GameTypeRegistry oracles *registry.OracleRegistry - l1Client *ethclient.Client - pollClient client.RPC + l1RPC client.RPC + l1Client *sources.L1Client + l1EthClient *ethclient.Client pprofService *oppprof.Service metricsSrv *httputil.HTTPServer @@ -90,12 +93,9 @@ func (s *Service) initFromConfig(ctx context.Context, cfg *config.Config) error return fmt.Errorf("failed to init tx manager: %w", err) } s.initClaimants(cfg) - if err := s.initL1Client(ctx, cfg); err != nil { + if err := s.initL1Clients(ctx, cfg); err != nil { return fmt.Errorf("failed to init l1 client: %w", err) } - if err := s.initPollClient(ctx, cfg); err != nil { - return fmt.Errorf("failed to init poll client: %w", err) - } if err := s.initPProf(&cfg.PprofConfig); err != nil { return fmt.Errorf("failed to init profiling: %w", err) } @@ -140,21 +140,28 @@ func (s *Service) initTxManager(ctx context.Context, cfg *config.Config) error { return nil } -func (s *Service) initL1Client(ctx context.Context, cfg *config.Config) error { - l1Client, err := dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) +func (s *Service) initL1Clients(ctx context.Context, cfg *config.Config) error { + l1EthClient, err := dial.DialEthClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) if err != nil { return fmt.Errorf("failed to dial L1: %w", err) } - s.l1Client = l1Client - return nil -} -func (s *Service) initPollClient(ctx context.Context, cfg *config.Config) error { - pollClient, err := client.NewRPCWithClient(ctx, s.logger, cfg.L1EthRpc, client.NewBaseRPCClient(s.l1Client.Client()), cfg.PollInterval) + l1RPC := client.NewBaseRPCClient(l1EthClient.Client(), client.WithCallTimeout(30*time.Second), client.WithBatchCallTimeout(60*time.Second)) + pollClient, err := client.NewRPCWithClient(ctx, s.logger, cfg.L1EthRpc, l1RPC, cfg.PollInterval) if err != nil { return fmt.Errorf("failed to create RPC client: %w", err) } - s.pollClient = pollClient + s.l1RPC = pollClient + + l1Client, err := sources.NewL1Client(s.l1RPC, s.logger, s.metrics, sources.L1ClientSimpleConfig(true, cfg.L1RPCKind, 100)) + if err != nil { + return fmt.Errorf("failed to dial L1: %w", err) + } + + s.l1Client = l1Client + s.l1RPC = l1Client.RPC() + + s.l1EthClient = l1EthClient return nil } @@ -190,13 +197,13 @@ func (s *Service) initMetricsServer(cfg *opmetrics.CLIConfig) error { } s.logger.Info("started metrics server", "addr", metricsSrv.Addr()) s.metricsSrv = metricsSrv - s.balanceMetricer = s.metrics.StartBalanceMetrics(s.logger, s.l1Client, s.txSender.From()) + s.balanceMetricer = s.metrics.StartBalanceMetrics(s.logger, s.l1EthClient, s.txSender.From()) return nil } func (s *Service) initFactoryContract(ctx context.Context, cfg *config.Config) error { factoryContract, err := contracts.NewDisputeGameFactoryContract(ctx, s.metrics, cfg.GameFactoryAddress, - batching.NewMultiCaller(s.l1Client.Client(), batching.DefaultBatchSize)) + batching.NewMultiCaller(s.l1RPC, batching.DefaultBatchSize)) if err != nil { return fmt.Errorf("failed to create factory contract: %w", err) } @@ -213,7 +220,7 @@ func (s *Service) initBondClaims() error { func (s *Service) registerGameTypes(ctx context.Context, cfg *config.Config) error { gameTypeRegistry := registry.NewGameTypeRegistry() oracles := registry.NewOracleRegistry() - s.clientProvider = challengerClient.NewProvider(ctx, s.logger, cfg, s.l1Client) + s.clientProvider = challengerClient.NewProvider(ctx, s.logger, cfg, s.l1Client, s.l1RPC) err := fault.RegisterGameTypes(ctx, s.systemClock, s.l1Clock, s.logger, s.metrics, cfg, gameTypeRegistry, oracles, s.txSender, s.factoryContract, s.clientProvider, cfg.SelectiveClaimResolution, s.claimants) if err != nil { return err @@ -242,7 +249,7 @@ func (s *Service) initLargePreimages() error { } func (s *Service) initMonitor(cfg *config.Config) { - s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, cfg.GameAllowlist, s.pollClient, cfg.MinUpdateInterval) + s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, cfg.GameAllowlist, s.l1RPC, cfg.MinUpdateInterval) } func (s *Service) Start(ctx context.Context) error { @@ -295,8 +302,8 @@ func (s *Service) Stop(ctx context.Context) error { s.txMgr.Close() } - if s.pollClient != nil { - s.pollClient.Close() + if s.l1RPC != nil { + s.l1RPC.Close() } if s.l1Client != nil { s.l1Client.Close() diff --git a/op-challenger/game/types/game_type.go b/op-challenger/game/types/game_type.go index 9e9943d05750f..55a50df9b2022 100644 --- a/op-challenger/game/types/game_type.go +++ b/op-challenger/game/types/game_type.go @@ -13,12 +13,12 @@ type GameType uint32 const ( CannonGameType GameType = 0 PermissionedGameType GameType = 1 - AsteriscGameType GameType = 2 - AsteriscKonaGameType GameType = 3 + AsteriscGameType GameType = 2 // Not supported by op-challenger + AsteriscKonaGameType GameType = 3 // Not supported by op-challenger SuperCannonGameType GameType = 4 SuperPermissionedGameType GameType = 5 OPSuccinctGameType GameType = 6 // Not supported by op-challenger - SuperAsteriscKonaGameType GameType = 7 + SuperAsteriscKonaGameType GameType = 7 // Not supported by op-challenger CannonKonaGameType GameType = 8 SuperCannonKonaGameType GameType = 9 OptimisticZKGameType GameType = 10 @@ -35,13 +35,10 @@ var SupportedGameTypes = []GameType{ CannonGameType, CannonKonaGameType, PermissionedGameType, - AsteriscGameType, - AsteriscKonaGameType, FastGameType, SuperCannonGameType, SuperCannonKonaGameType, SuperPermissionedGameType, - SuperAsteriscKonaGameType, OptimisticZKGameType, } diff --git a/op-challenger/game/types/types.go b/op-challenger/game/types/types.go index 4579e5476c1e9..1df72da5dde43 100644 --- a/op-challenger/game/types/types.go +++ b/op-challenger/game/types/types.go @@ -1,13 +1,18 @@ package types import ( + "context" "errors" "fmt" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" ) -var ErrInvalidPrestate = errors.New("absolute prestate does not match") +var ( + ErrNotInSync = errors.New("local node too far behind") + ErrInvalidPrestate = errors.New("absolute prestate does not match") +) type GameStatus uint8 @@ -45,3 +50,9 @@ type GameMetadata struct { Timestamp uint64 Proxy common.Address } + +type SyncValidator interface { + // ValidateNodeSynced checks that the local node is sufficiently up to date to play the game. + // It returns client.ErrNotInSync if the node is too far behind. + ValidateNodeSynced(ctx context.Context, gameL1Head eth.BlockID) error +} diff --git a/op-challenger/runner/factory.go b/op-challenger/runner/factory.go index 68e71d3427ca0..bf13351f1cc9b 100644 --- a/op-challenger/runner/factory.go +++ b/op-challenger/runner/factory.go @@ -6,7 +6,6 @@ import ( "net/url" "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" @@ -48,33 +47,6 @@ func createTraceProvider( } prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) return cannon.NewTraceProvider(logger, m, cfg.CannonKona, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil - case gameTypes.AsteriscGameType: - serverExecutor := vm.NewOpProgramServerExecutor(logger) - stateConverter := asterisc.NewStateConverter(cfg.Asterisc) - prestate, err := prestateSource.getPrestate(ctx, logger, cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, dir, stateConverter) - if err != nil { - return nil, err - } - prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return asterisc.NewTraceProvider(logger, m, cfg.Asterisc, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil - case gameTypes.AsteriscKonaGameType: - serverExecutor := vm.NewKonaExecutor() - stateConverter := asterisc.NewStateConverter(cfg.AsteriscKona) - prestate, err := prestateSource.getPrestate(ctx, logger, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) - if err != nil { - return nil, err - } - prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return asterisc.NewTraceProvider(logger, m, cfg.AsteriscKona, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil - case gameTypes.SuperAsteriscKonaGameType: - serverExecutor := vm.NewKonaSuperExecutor() - stateConverter := asterisc.NewStateConverter(cfg.AsteriscKona) - prestate, err := prestateSource.getPrestate(ctx, logger, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) - if err != nil { - return nil, err - } - prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) - return asterisc.NewTraceProvider(logger, m, cfg.AsteriscKona, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil } return nil, errors.New("invalid game type") } diff --git a/op-challenger/runner/game_inputs.go b/op-challenger/runner/game_inputs.go index 2814ec5e48e68..bbd0c7039279d 100644 --- a/op-challenger/runner/game_inputs.go +++ b/op-challenger/runner/game_inputs.go @@ -18,7 +18,7 @@ import ( func createGameInputs(ctx context.Context, log log.Logger, rollupClient *sources.RollupClient, supervisorClient *sources.SupervisorClient, typeName string, gameType gameTypes.GameType) (utils.LocalGameInputs, error) { switch gameType { - case gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType, gameTypes.SuperAsteriscKonaGameType, gameTypes.SuperCannonKonaGameType: + case gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType, gameTypes.SuperCannonKonaGameType: if supervisorClient == nil { return utils.LocalGameInputs{}, fmt.Errorf("game type %s requires supervisor rpc to be set", gameType) } @@ -96,7 +96,7 @@ func createGameInputsInterop(ctx context.Context, log log.Logger, client *source prestateProvider := super.NewSuperRootPrestateProvider(client, agreedTimestamp) gameDepth := types.Depth(30) - provider := super.NewSuperTraceProvider(log, nil, prestateProvider, client, l1Head.ID(), gameDepth, agreedTimestamp, claimTimestamp+10) + provider := super.NewSupervisorSuperTraceProvider(log, nil, prestateProvider, client, l1Head.ID(), gameDepth, agreedTimestamp, claimTimestamp+10) var agreedPrestate []byte var claim common.Hash switch rand.IntN(3) { diff --git a/op-challenger/runner/metrics.go b/op-challenger/runner/metrics.go index 8aa7494fa2c04..9b0fd98a56846 100644 --- a/op-challenger/runner/metrics.go +++ b/op-challenger/runner/metrics.go @@ -20,16 +20,22 @@ type Metrics struct { *metrics.VmMetrics opmetrics.RPCMetrics - up prometheus.Gauge - vmLastExecutionTime *prometheus.GaugeVec - vmLastMemoryUsed *prometheus.GaugeVec - successTotal *prometheus.CounterVec - failuresTotal *prometheus.CounterVec - consecutiveFailuresCurrent *prometheus.GaugeVec - panicsTotal *prometheus.CounterVec - invalidTotal *prometheus.CounterVec + up prometheus.Gauge + vmLastExecutionTime *prometheus.GaugeVec + vmLastMemoryUsed *prometheus.GaugeVec + successTotal *prometheus.CounterVec + setupFailuresTotal *prometheus.CounterVec + consecutiveSetupFailuresCurrent *prometheus.GaugeVec + vmFailuresTotal *prometheus.CounterVec } +// Reason labels for vmFailuresTotal metric +const ( + ReasonIncorrectStatus = "incorrect_status" + ReasonPanic = "panic" + ReasonTimeout = "timeout" +) + var _ Metricer = (*Metrics)(nil) // Metrics implementation must implement RegistryMetricer to allow the metrics server to work. @@ -68,34 +74,30 @@ func NewMetrics(runConfigs []RunConfig) *Metrics { Name: "success_total", Help: "Number of VM executions that successfully verified the output root", }, []string{"type"}), - failuresTotal: factory.NewCounterVec(prometheus.CounterOpts{ + setupFailuresTotal: factory.NewCounterVec(prometheus.CounterOpts{ Namespace: Namespace, - Name: "failures_total", - Help: "Number of failures to execute a VM", + Name: "setup_failures_total", + Help: "Number of setup failures before VM execution", }, []string{"type"}), - consecutiveFailuresCurrent: factory.NewGaugeVec(prometheus.GaugeOpts{ + consecutiveSetupFailuresCurrent: factory.NewGaugeVec(prometheus.GaugeOpts{ Namespace: Namespace, - Name: "consecutive_failures_current", + Name: "consecutive_setup_failures_current", Help: "Number of consecutive setup failures by VM type. Resets to 0 on any complete run.", }, []string{"type"}), - panicsTotal: factory.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "panics_total", - Help: "Number of times the VM panicked", - }, []string{"type"}), - invalidTotal: factory.NewCounterVec(prometheus.CounterOpts{ + vmFailuresTotal: factory.NewCounterVec(prometheus.CounterOpts{ Namespace: Namespace, - Name: "invalid_total", - Help: "Number of runs that determined the output root was invalid", - }, []string{"type"}), + Name: "vm_failures_total", + Help: "Number of VM execution failures by type and reason (incorrect_status, panic, timeout)", + }, []string{"type", "reason"}), } for _, runConfig := range runConfigs { metrics.successTotal.WithLabelValues(runConfig.Name).Add(0) - metrics.failuresTotal.WithLabelValues(runConfig.Name).Add(0) - metrics.consecutiveFailuresCurrent.WithLabelValues(runConfig.Name).Set(0) - metrics.panicsTotal.WithLabelValues(runConfig.Name).Add(0) - metrics.invalidTotal.WithLabelValues(runConfig.Name).Add(0) + metrics.setupFailuresTotal.WithLabelValues(runConfig.Name).Add(0) + metrics.consecutiveSetupFailuresCurrent.WithLabelValues(runConfig.Name).Set(0) + metrics.vmFailuresTotal.WithLabelValues(runConfig.Name, ReasonIncorrectStatus).Add(0) + metrics.vmFailuresTotal.WithLabelValues(runConfig.Name, ReasonPanic).Add(0) + metrics.vmFailuresTotal.WithLabelValues(runConfig.Name, ReasonTimeout).Add(0) metrics.RecordUp() } @@ -123,22 +125,15 @@ func (m *Metrics) RecordVmMemoryUsed(vmType string, memoryUsed uint64) { func (m *Metrics) RecordSuccess(vmType string) { m.successTotal.WithLabelValues(vmType).Inc() - m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) -} - -func (m *Metrics) RecordFailure(vmType string) { - m.failuresTotal.WithLabelValues(vmType).Inc() - m.consecutiveFailuresCurrent.WithLabelValues(vmType).Inc() + m.consecutiveSetupFailuresCurrent.WithLabelValues(vmType).Set(0) } -func (m *Metrics) RecordPanic(vmType string) { - m.panicsTotal.WithLabelValues(vmType).Inc() - // The result was bad, but we still completed setup successfully - m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) +func (m *Metrics) RecordSetupFailure(vmType string) { + m.setupFailuresTotal.WithLabelValues(vmType).Inc() + m.consecutiveSetupFailuresCurrent.WithLabelValues(vmType).Inc() } -func (m *Metrics) RecordInvalid(vmType string) { - m.invalidTotal.WithLabelValues(vmType).Inc() - // The result was bad, but we still completed setup successfully - m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) +func (m *Metrics) RecordVmFailure(vmType string, reason string) { + m.vmFailuresTotal.WithLabelValues(vmType, reason).Inc() + m.consecutiveSetupFailuresCurrent.WithLabelValues(vmType).Set(0) } diff --git a/op-challenger/runner/runner.go b/op-challenger/runner/runner.go index 796a9da1897a3..f3de6125d9d25 100644 --- a/op-challenger/runner/runner.go +++ b/op-challenger/runner/runner.go @@ -36,6 +36,7 @@ import ( var ( ErrUnexpectedStatusCode = errors.New("unexpected status code") + ErrVMTimeout = errors.New("VM execution timed out") ) type Metricer interface { @@ -43,9 +44,8 @@ type Metricer interface { metrics.VmMetricer opmetrics.RPCMetricer - RecordFailure(vmType string) - RecordPanic(vmType string) - RecordInvalid(vmType string) + RecordSetupFailure(vmType string) + RecordVmFailure(vmType string, reason string) RecordSuccess(vmType string) } @@ -56,11 +56,24 @@ type RunConfig struct { PrestateFilename string } +type TraceProviderCreator func( + ctx context.Context, + logger log.Logger, + m trace.Metricer, + cfg *config.Config, + prestateSource prestateFetcher, + gameType gameTypes.GameType, + localInputs utils.LocalGameInputs, + dir string, +) (types.TraceProvider, error) + type Runner struct { - log log.Logger - cfg *config.Config - runConfigs []RunConfig - m Metricer + log log.Logger + cfg *config.Config + runConfigs []RunConfig + m Metricer + vmTimeout time.Duration + traceProviderCreator TraceProviderCreator running atomic.Bool ctx context.Context @@ -69,12 +82,14 @@ type Runner struct { metricsSrv *httputil.HTTPServer } -func NewRunner(logger log.Logger, cfg *config.Config, runConfigs []RunConfig) *Runner { +func NewRunner(logger log.Logger, cfg *config.Config, runConfigs []RunConfig, vmTimeout time.Duration) *Runner { return &Runner{ - log: logger, - cfg: cfg, - runConfigs: runConfigs, - m: NewMetrics(runConfigs), + log: logger, + cfg: cfg, + runConfigs: runConfigs, + m: NewMetrics(runConfigs), + vmTimeout: vmTimeout, + traceProviderCreator: createTraceProvider, } } @@ -99,9 +114,9 @@ func (r *Runner) Start(ctx context.Context) error { rollupClient = cl } var supervisorClient *sources.SupervisorClient - if r.cfg.SupervisorRPC != "" { - r.log.Info("Dialling supervisor client", "url", r.cfg.SupervisorRPC) - cl, err := dial.DialSupervisorClientWithTimeout(ctx, r.log, r.cfg.SupervisorRPC) + if r.cfg.SuperRPC != "" { + r.log.Info("Dialling supervisor client", "url", r.cfg.SuperRPC) + cl, err := dial.DialSupervisorClientWithTimeout(ctx, r.log, r.cfg.SuperRPC) if err != nil { return fmt.Errorf("failed to dial supervisor: %w", err) } @@ -142,13 +157,16 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, rlog log.Logger, runConfi recordError := func(err error, configName string, m Metricer, log log.Logger) { if errors.Is(err, ErrUnexpectedStatusCode) { log.Error("Incorrect status code", "type", runConfig.Name, "err", err) - m.RecordInvalid(configName) + m.RecordVmFailure(configName, ReasonIncorrectStatus) } else if errors.Is(err, trace.ErrVMPanic) { log.Error("VM panicked", "type", runConfig.Name) - m.RecordPanic(configName) + m.RecordVmFailure(configName, ReasonPanic) + } else if errors.Is(err, ErrVMTimeout) { + log.Error("VM execution timed out", "type", runConfig.Name, "timeout", r.vmTimeout) + m.RecordVmFailure(configName, ReasonTimeout) } else if err != nil { log.Error("Failed to run", "type", runConfig.Name, "err", err) - m.RecordFailure(configName) + m.RecordSetupFailure(configName) } else { log.Info("Successfully verified output root", "type", runConfig.Name) m.RecordSuccess(configName) @@ -195,12 +213,20 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, rlog log.Logger, runConfi } func (r *Runner) runOnce(ctx context.Context, logger log.Logger, name string, gameType gameTypes.GameType, prestateSource prestateFetcher, localInputs utils.LocalGameInputs, dir string) error { - provider, err := createTraceProvider(ctx, logger, metrics.NewTypedVmMetrics(r.m, name), r.cfg, prestateSource, gameType, localInputs, dir) + if r.vmTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, r.vmTimeout) + defer cancel() + } + provider, err := r.traceProviderCreator(ctx, logger, metrics.NewTypedVmMetrics(r.m, name), r.cfg, prestateSource, gameType, localInputs, dir) if err != nil { return fmt.Errorf("failed to create trace provider: %w", err) } hash, err := provider.Get(ctx, types.RootPosition) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("%w: %w", ErrVMTimeout, err) + } return fmt.Errorf("failed to execute trace provider: %w", err) } if hash[0] != mipsevm.VMStatusValid { diff --git a/op-challenger/runner/runner_test.go b/op-challenger/runner/runner_test.go new file mode 100644 index 0000000000000..2af0cea0b9065 --- /dev/null +++ b/op-challenger/runner/runner_test.go @@ -0,0 +1,138 @@ +package runner + +import ( + "context" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestNewRunnerSetsTimeout(t *testing.T) { + timeout := 5 * time.Minute + r := NewRunner(nil, nil, nil, timeout) + require.Equal(t, timeout, r.vmTimeout) +} + +func TestRunOnceAppliesTimeout(t *testing.T) { + tests := []struct { + name string + vmTimeout time.Duration + expectDeadline bool + }{ + { + name: "timeout applied when set", + vmTimeout: 100 * time.Millisecond, + expectDeadline: true, + }, + { + name: "no deadline when timeout is zero", + vmTimeout: 0, + expectDeadline: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var capturedCtx context.Context + + mockCreator := func( + ctx context.Context, + _ log.Logger, + _ vm.Metricer, + _ *config.Config, + _ prestateFetcher, + _ gameTypes.GameType, + _ utils.LocalGameInputs, + _ string, + ) (types.TraceProvider, error) { + capturedCtx = ctx + return &stubTraceProvider{}, nil + } + + r := &Runner{ + vmTimeout: tt.vmTimeout, + traceProviderCreator: mockCreator, + } + + err := r.runOnce(context.Background(), log.New(), "test", gameTypes.CannonGameType, nil, utils.LocalGameInputs{}, "") + require.NoError(t, err) + + _, hasDeadline := capturedCtx.Deadline() + require.Equal(t, tt.expectDeadline, hasDeadline) + }) + } +} + +type stubTraceProvider struct{} + +func (s *stubTraceProvider) Get(_ context.Context, _ types.Position) (common.Hash, error) { + // Return a hash with VMStatusValid as the first byte + var hash common.Hash + hash[0] = mipsevm.VMStatusValid + return hash, nil +} + +func (s *stubTraceProvider) GetStepData(_ context.Context, _ types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { + return nil, nil, nil, nil +} + +func (s *stubTraceProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + return common.Hash{}, nil +} + +func (s *stubTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + return nil, types.ErrL2BlockNumberValid +} + +// slowTraceProvider blocks until context is done, simulating a slow VM +type slowTraceProvider struct{} + +func (s *slowTraceProvider) Get(ctx context.Context, _ types.Position) (common.Hash, error) { + <-ctx.Done() + return common.Hash{}, ctx.Err() +} + +func (s *slowTraceProvider) GetStepData(_ context.Context, _ types.Position) ([]byte, []byte, *types.PreimageOracleData, error) { + return nil, nil, nil, nil +} + +func (s *slowTraceProvider) AbsolutePreStateCommitment(_ context.Context) (common.Hash, error) { + return common.Hash{}, nil +} + +func (s *slowTraceProvider) GetL2BlockNumberChallenge(_ context.Context) (*types.InvalidL2BlockNumberChallenge, error) { + return nil, types.ErrL2BlockNumberValid +} + +func TestRunOnceReturnsTimeoutError(t *testing.T) { + mockCreator := func( + _ context.Context, + _ log.Logger, + _ vm.Metricer, + _ *config.Config, + _ prestateFetcher, + _ gameTypes.GameType, + _ utils.LocalGameInputs, + _ string, + ) (types.TraceProvider, error) { + return &slowTraceProvider{}, nil + } + + r := &Runner{ + vmTimeout: 50 * time.Millisecond, + traceProviderCreator: mockCreator, + } + + err := r.runOnce(context.Background(), log.New(), "test", gameTypes.CannonGameType, nil, utils.LocalGameInputs{}, "") + require.ErrorIs(t, err, ErrVMTimeout) + require.ErrorIs(t, err, context.DeadlineExceeded) +} diff --git a/op-challenger/sender/sender_test.go b/op-challenger/sender/sender_test.go index 54d12d45f4780..ea5f0986d39b1 100644 --- a/op-challenger/sender/sender_test.go +++ b/op-challenger/sender/sender_test.go @@ -194,6 +194,6 @@ func (s *stubTxMgr) API() rpc.API { func (s *stubTxMgr) Close() { } -func (s *stubTxMgr) SuggestGasPriceCaps(context.Context) (*big.Int, *big.Int, *big.Int, error) { +func (s *stubTxMgr) SuggestGasPriceCaps(context.Context) (*big.Int, *big.Int, *big.Int, *big.Int, error) { panic("unimplemented") } diff --git a/op-deployer/pkg/deployer/abi_types.go b/op-deployer/pkg/deployer/abi_types.go new file mode 100644 index 0000000000000..5f6502d72e45f --- /dev/null +++ b/op-deployer/pkg/deployer/abi_types.go @@ -0,0 +1,13 @@ +package deployer + +import ( + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" +) + +// Primitive ABI types primarily used with `abi.Arguments` to pack/unpack values when calling contract methods. +var ( + Uint256Type = opcm.MustType("uint256") + BytesType = opcm.MustType("bytes") + AddressType = opcm.MustType("address") + Bytes32Type = opcm.MustType("bytes32") +) diff --git a/op-deployer/pkg/deployer/apply.go b/op-deployer/pkg/deployer/apply.go index f0ce4b723bcc3..80a3cc1b5b729 100644 --- a/op-deployer/pkg/deployer/apply.go +++ b/op-deployer/pkg/deployer/apply.go @@ -316,6 +316,7 @@ func ApplyPipeline( opts.Logger, deployer, bundle.L1, + script.WithNoMaxCodeSize(), // Allow unoptimized contracts from the forge lite profile in genesis deployments ) if err != nil { return fmt.Errorf("failed to create L1 script host: %w", err) diff --git a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go index b04390fc8aa78..4390678b3d070 100644 --- a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go +++ b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go @@ -16,6 +16,9 @@ var ( // dummyBlobFee is a dummy value for the blob fee. Since this gas estimator will never // post blobs, it's just set to 1. dummyBlobFee = big.NewInt(1) + // dummyBlobTipCap is a dummy value for the blob tip cap. Since this gas estimator will never + // post blobs, it's just set to 0. + dummyBlobTipCap = big.NewInt(0) // maxTip is the maximum tip that can be suggested by this estimator. maxTip = big.NewInt(50 * 1e9) // minTip is the minimum tip that can be suggested by this estimator. @@ -25,15 +28,15 @@ var ( // DeployerGasPriceEstimator is a custom gas price estimator for use with op-deployer. // It pads the base fee by 50% and multiplies the suggested tip by 5 up to a max of // 50 gwei. -func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, error) { +func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, *big.Int, error) { chainHead, err := client.HeaderByNumber(ctx, nil) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get block: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to get block: %w", err) } tip, err := client.SuggestGasTipCap(ctx) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get gas tip cap: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to get gas tip cap: %w", err) } baseFeePad := new(big.Int).Div(chainHead.BaseFee, baseFeePadFactor) @@ -48,5 +51,5 @@ func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*b paddedTip.Set(maxTip) } - return paddedTip, paddedBaseFee, dummyBlobFee, nil + return paddedTip, paddedBaseFee, dummyBlobTipCap, dummyBlobFee, nil } diff --git a/op-deployer/pkg/deployer/constants.go b/op-deployer/pkg/deployer/constants.go new file mode 100644 index 0000000000000..02153ef600355 --- /dev/null +++ b/op-deployer/pkg/deployer/constants.go @@ -0,0 +1,10 @@ +package deployer + +import "github.com/ethereum/go-ethereum/common" + +// Constants for Sepolia chain. +var ( + SepoliaChainID uint64 = 11155111 + DefaultL1ProxyAdminOwnerSepolia common.Address = common.HexToAddress("0x1Eb2fFc903729a0F03966B917003800b145F56E2") + DefaultSystemConfigProxySepolia common.Address = common.HexToAddress("0x034edD2A225f7f429A63E0f1D2084B9E0A93b538") +) diff --git a/op-deployer/pkg/deployer/devfeatures.go b/op-deployer/pkg/deployer/devfeatures.go index fefd390e06b9e..88993d916ef69 100644 --- a/op-deployer/pkg/deployer/devfeatures.go +++ b/op-deployer/pkg/deployer/devfeatures.go @@ -17,6 +17,9 @@ var ( // DeployV2DisputeGamesDevFlag enables deployment of V2 dispute game contracts. DeployV2DisputeGamesDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000100") + + // OPCMV2DevFlag enables the OPContractsManagerV2 contract. + OPCMV2DevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000010000") ) // IsDevFeatureEnabled checks if a specific development feature is enabled in a feature bitmap. diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index b2ab4c514505c..6d8d38ab7ac6f 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -51,6 +51,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -169,50 +170,66 @@ func TestEndToEndBootstrapApply(t *testing.T) { func TestEndToEndBootstrapApplyWithUpgrade(t *testing.T) { op_e2e.InitParallel(t) - lgr := testlog.Logger(t, slog.LevelDebug) + tests := []struct { + name string + devFeature common.Hash + }{ + {"default", common.Hash{}}, + {"opcm-v2", deployer.OPCMV2DevFlag}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + op_e2e.InitParallel(t) + lgr := testlog.Logger(t, slog.LevelDebug) - forkedL1, stopL1, err := devnet.NewForkedSepolia(lgr) - require.NoError(t, err) - pkHex, _, _ := shared.DefaultPrivkey(t) - t.Cleanup(func() { - require.NoError(t, stopL1()) - }) - loc, afactsFS := testutil.LocalArtifacts(t) - testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) + forkedL1, stopL1, err := devnet.NewForkedSepolia(lgr) + require.NoError(t, err) + pkHex, _, _ := shared.DefaultPrivkey(t) + t.Cleanup(func() { + require.NoError(t, stopL1()) + }) + loc, afactsFS := testutil.LocalArtifacts(t) + testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) - superchain, err := standard.SuperchainFor(11155111) - require.NoError(t, err) + superchain, err := standard.SuperchainFor(11155111) + require.NoError(t, err) - superchainProxyAdmin, err := standard.SuperchainProxyAdminAddrFor(11155111) - require.NoError(t, err) + superchainProxyAdmin, err := standard.SuperchainProxyAdminAddrFor(11155111) + require.NoError(t, err) - superchainProxyAdminOwner, err := standard.L1ProxyAdminOwner(11155111) - require.NoError(t, err) + superchainProxyAdminOwner, err := standard.L1ProxyAdminOwner(11155111) + require.NoError(t, err) - cfg := bootstrap.ImplementationsConfig{ - L1RPCUrl: forkedL1.RPCUrl(), - PrivateKey: pkHex, - ArtifactsLocator: loc, - MIPSVersion: int(standard.MIPSVersion), - WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, - MinProposalSizeBytes: standard.MinProposalSizeBytes, - ChallengePeriodSeconds: standard.ChallengePeriodSeconds, - ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, - DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, - DevFeatureBitmap: common.Hash{}, - SuperchainConfigProxy: superchain.SuperchainConfigAddr, - ProtocolVersionsProxy: superchain.ProtocolVersionsAddr, - L1ProxyAdminOwner: superchainProxyAdminOwner, - SuperchainProxyAdmin: superchainProxyAdmin, - CacheDir: testCacheDir, - Logger: lgr, - Challenger: common.Address{'C'}, - FaultGameMaxGameDepth: standard.DisputeMaxGameDepth, - FaultGameSplitDepth: standard.DisputeSplitDepth, - FaultGameClockExtension: standard.DisputeClockExtension, - FaultGameMaxClockDuration: standard.DisputeMaxClockDuration, + cfg := bootstrap.ImplementationsConfig{ + L1RPCUrl: forkedL1.RPCUrl(), + PrivateKey: pkHex, + ArtifactsLocator: loc, + MIPSVersion: int(standard.MIPSVersion), + WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, + MinProposalSizeBytes: standard.MinProposalSizeBytes, + ChallengePeriodSeconds: standard.ChallengePeriodSeconds, + ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, + DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, + DevFeatureBitmap: tt.devFeature, + SuperchainConfigProxy: superchain.SuperchainConfigAddr, + ProtocolVersionsProxy: superchain.ProtocolVersionsAddr, + L1ProxyAdminOwner: superchainProxyAdminOwner, + SuperchainProxyAdmin: superchainProxyAdmin, + CacheDir: testCacheDir, + Logger: lgr, + Challenger: common.Address{'C'}, + FaultGameMaxGameDepth: standard.DisputeMaxGameDepth, + FaultGameSplitDepth: standard.DisputeSplitDepth, + FaultGameClockExtension: standard.DisputeClockExtension, + FaultGameMaxClockDuration: standard.DisputeMaxClockDuration, + } + if deployer.IsDevFeatureEnabled(tt.devFeature, deployer.OPCMV2DevFlag) { + cfg.DevFeatureBitmap = deployer.OPCMV2DevFlag + } + + runEndToEndBootstrapAndApplyUpgradeTest(t, afactsFS, cfg) + }) } - runEndToEndBootstrapAndApplyUpgradeTest(t, afactsFS, cfg) } func TestEndToEndApply(t *testing.T) { @@ -346,6 +363,62 @@ func TestEndToEndApply(t *testing.T) { require.True(t, exists, "Native asset liquidity predeploy should exist in L2 genesis") require.Equal(t, amount, account.Balance, "Native asset liquidity predeploy should have the configured balance") }) + + t.Run("OPCMV2 deployment", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + lgr := testlog.Logger(t, slog.LevelDebug) + l1RPC, l1Client := devnet.DefaultAnvilRPC(t, lgr) + _, pk, dk := shared.DefaultPrivkey(t) + l1ChainID := new(big.Int).SetUint64(devnet.DefaultChainID) + l2ChainID := uint256.NewInt(1) + loc, _ := testutil.LocalArtifacts(t) + testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) + + intent, st := shared.NewIntent(t, l1ChainID, dk, l2ChainID, loc, loc, testCustomGasLimit) + + // Enable OPCMV2 dev flag + intent.GlobalDeployOverrides = map[string]any{ + "devFeatureBitmap": deployer.OPCMV2DevFlag, + } + + require.NoError(t, deployer.ApplyPipeline( + ctx, + deployer.ApplyPipelineOpts{ + DeploymentTarget: deployer.DeploymentTargetLive, + L1RPCUrl: l1RPC, + DeployerPrivateKey: pk, + Intent: intent, + State: st, + Logger: lgr, + StateWriter: pipeline.NoopStateWriter(), + CacheDir: testCacheDir, + }, + )) + + // Verify that OPCMV2 was deployed in implementations + require.NotEmpty(t, st.ImplementationsDeployment.OpcmV2Impl, "OPCMV2 implementation should be deployed") + require.NotEmpty(t, st.ImplementationsDeployment.OpcmContainerImpl, "OPCM container implementation should be deployed") + require.NotEmpty(t, st.ImplementationsDeployment.OpcmStandardValidatorImpl, "OPCM standard validator implementation should be deployed") + + // Verify that implementations are deployed on L1 + cg := ethClientCodeGetter(ctx, l1Client) + + opcmV2Code := cg(t, st.ImplementationsDeployment.OpcmV2Impl) + require.NotEmpty(t, opcmV2Code, "OPCMV2 should have code deployed") + + // Verify that the dev feature bitmap is set to OPCMV2 + require.Equal(t, deployer.OPCMV2DevFlag, intent.GlobalDeployOverrides["devFeatureBitmap"]) + + // Assert that the OPCM V1 addresses are zero + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmImpl, "OPCM V1 implementation should be zero") + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmContractsContainerImpl, "OPCM container implementation should be zero") + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmGameTypeAdderImpl, "OPCM game type adder implementation should be zero") + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmDeployerImpl, "OPCM deployer implementation should be zero") + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmUpgraderImpl, "OPCM upgrader implementation should be zero") + require.Equal(t, common.Address{}, st.ImplementationsDeployment.OpcmInteropMigratorImpl, "OPCM interop migrator implementation should be zero") + }) } func TestGlobalOverrides(t *testing.T) { @@ -734,7 +807,7 @@ func TestIntentConfiguration(t *testing.T) { func runEndToEndBootstrapAndApplyUpgradeTest(t *testing.T, afactsFS foundry.StatDirFs, implementationsConfig bootstrap.ImplementationsConfig) { lgr := implementationsConfig.Logger - ctx, cancel := context.WithTimeout(context.Background(), 90*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second) defer cancel() superchainProxyAdminOwner := implementationsConfig.L1ProxyAdminOwner @@ -789,12 +862,16 @@ func runEndToEndBootstrapAndApplyUpgradeTest(t *testing.T, afactsFS foundry.Stat // Then run the OPCM upgrade t.Run("upgrade opcm", func(t *testing.T) { + if deployer.IsDevFeatureEnabled(implementationsConfig.DevFeatureBitmap, deployer.OPCMV2DevFlag) { + t.Skip("Skipping OPCM upgrade for OPCM V2") + return + } upgradeConfig := embedded.UpgradeOPChainInput{ Prank: superchainProxyAdminOwner, Opcm: impls.Opcm, - EncodedChainConfigs: []embedded.OPChainConfig{ + ChainConfigs: []embedded.OPChainConfig{ { - SystemConfigProxy: common.HexToAddress("034edD2A225f7f429A63E0f1D2084B9E0A93b538"), + SystemConfigProxy: deployer.DefaultSystemConfigProxySepolia, CannonPrestate: common.Hash{'C', 'A', 'N', 'N', 'O', 'N'}, CannonKonaPrestate: common.Hash{'K', 'O', 'N', 'A'}, }, @@ -806,6 +883,187 @@ func runEndToEndBootstrapAndApplyUpgradeTest(t *testing.T, afactsFS foundry.Stat err = embedded.DefaultUpgrader.Upgrade(host, upgradeConfigBytes) require.NoError(t, err, "OPCM upgrade should succeed") }) + t.Run("upgrade opcm v2", func(t *testing.T) { + if !deployer.IsDevFeatureEnabled(implementationsConfig.DevFeatureBitmap, deployer.OPCMV2DevFlag) { + t.Skip("Skipping OPCM V2 upgrade for non-OPCM V2 dev feature") + return + } + require.NotEqual(t, common.Address{}, impls.OpcmV2, "OpcmV2 address should not be zero") + t.Logf("Using OpcmV2 at address: %s", impls.OpcmV2.Hex()) + t.Logf("Using OpcmUtils at address: %s", impls.OpcmUtils.Hex()) + t.Logf("Using OpcmContainer at address: %s", impls.OpcmContainer.Hex()) + + // Verify OPCM V2 has code deployed + opcmCode, err := versionClient.CodeAt(ctx, impls.OpcmV2, nil) + require.NoError(t, err) + require.NotEmpty(t, opcmCode, "OPCM V2 should have code deployed") + t.Logf("OPCM V2 code size: %d bytes", len(opcmCode)) + + // Verify OpcmUtils has code deployed + utilsCode, err := versionClient.CodeAt(ctx, impls.OpcmUtils, nil) + require.NoError(t, err) + require.NotEmpty(t, utilsCode, "OpcmUtils should have code deployed") + t.Logf("OpcmUtils code size: %d bytes", len(utilsCode)) + + // Verify OpcmContainer has code deployed + containerCode, err := versionClient.CodeAt(ctx, impls.OpcmContainer, nil) + require.NoError(t, err) + require.NotEmpty(t, containerCode, "OpcmContainer should have code deployed") + t.Logf("OpcmContainer code size: %d bytes", len(containerCode)) + + // First, upgrade the superchain with V2 + t.Run("upgrade superchain v2", func(t *testing.T) { + superchainUpgradeConfig := embedded.UpgradeSuperchainConfigInput{ + Prank: superchainProxyAdminOwner, + Opcm: impls.OpcmV2, + SuperchainConfig: implementationsConfig.SuperchainConfigProxy, + ExtraInstructions: []embedded.ExtraInstruction{}, + } + err := embedded.UpgradeSuperchainConfig(host, superchainUpgradeConfig) + if err != nil { + t.Logf("Superchain upgrade may have failed (could already be upgraded): %v", err) + } else { + t.Log("Superchain V2 upgrade succeeded") + } + }) + + // Then test upgrade on the V2-deployed chain + t.Run("upgrade chain v2", func(t *testing.T) { + // ABI-encode game args for FaultDisputeGameConfig{absolutePrestate} + bytes32Type := deployer.Bytes32Type + addressType := deployer.AddressType + + // FaultDisputeGameConfig just needs absolutePrestate (bytes32) + testPrestate := common.Hash{'P', 'R', 'E', 'S', 'T', 'A', 'T', 'E'} + cannonArgs, err := abi.Arguments{{Type: bytes32Type}}.Pack(testPrestate) + require.NoError(t, err) + + // PermissionedDisputeGameConfig needs absolutePrestate, proposer, challenger + testProposer := common.Address{'P'} + testChallenger := common.Address{'C'} + permissionedArgs, err := abi.Arguments{ + {Type: bytes32Type}, + {Type: addressType}, + {Type: addressType}, + }.Pack(testPrestate, testProposer, testChallenger) + require.NoError(t, err) + + upgradeConfig := embedded.UpgradeOPChainInput{ + Prank: superchainProxyAdminOwner, + Opcm: impls.OpcmV2, + UpgradeInputV2: &embedded.UpgradeInputV2{ + SystemConfig: deployer.DefaultSystemConfigProxySepolia, + DisputeGameConfigs: []embedded.DisputeGameConfig{ + { + Enabled: true, + InitBond: big.NewInt(1000000000000000000), + GameType: embedded.GameTypeCannon, + GameArgs: cannonArgs, + }, + { + Enabled: true, + InitBond: big.NewInt(1000000000000000000), + GameType: embedded.GameTypePermissionedCannon, + GameArgs: permissionedArgs, + }, + { + Enabled: false, + InitBond: big.NewInt(0), + GameType: embedded.GameTypeCannonKona, + GameArgs: []byte{}, // Disabled games don't need args + }, + }, + ExtraInstructions: []embedded.ExtraInstruction{ + { + Key: "PermittedProxyDeployment", + Data: []byte("DelayedWETH"), + }, + // TODO(#18502): Remove the extra instruction for custom gas token after U18 ships. + { + Key: "overrides.cfg.useCustomGasToken", + Data: make([]byte, 32), + }, + }, + }, + } + + upgradeConfigBytes, err := json.Marshal(upgradeConfig) + require.NoError(t, err, "UpgradeOPChainV2Input should marshal to JSON") + + // Verify input encoding + encodedData, err := upgradeConfig.EncodedUpgradeInputV2() + require.NoError(t, err, "Should encode UpgradeInputV2") + require.NotEmpty(t, encodedData, "Encoded data should not be empty") + + // Build expected hex encoding + // Structure breakdown: + // - Tuple offset (0x20) + // - SystemConfig address (0x034edd2a225f7f429a63e0f1d2084b9e0a93b538) + // - DisputeGameConfigs array offset (0x60) and ExtraInstructions array offset (0x340) + // - DisputeGameConfigs[]: 3 configs + // [0] Cannon: enabled=true, initBond=1e18, gameType=0, gameArgs="PRESTATE" + // [1] PermissionedCannon: enabled=true, initBond=1e18, gameType=1, gameArgs="PRESTATE"+proposer+challenger + // [2] CannonKona: enabled=false, initBond=0, gameType=0, gameArgs=empty + // - ExtraInstructions[]: 2 instructions + // [0] key="PermittedProxyDeployment", data="DelayedWETH" + // [1] key="overrides.cfg.useCustomGasToken", data=32 zero bytes + expected := "0000000000000000000000000000000000000000000000000000000000000020" + // offset to tuple + "000000000000000000000000034edd2a225f7f429a63e0f1d2084b9e0a93b538" + // systemConfig address + "0000000000000000000000000000000000000000000000000000000000000060" + // offset to disputeGameConfigs + "0000000000000000000000000000000000000000000000000000000000000340" + // offset to extraInstructions + "0000000000000000000000000000000000000000000000000000000000000003" + // disputeGameConfigs.length (3) + "0000000000000000000000000000000000000000000000000000000000000060" + // offset to disputeGameConfigs[0] + "0000000000000000000000000000000000000000000000000000000000000120" + // offset to disputeGameConfigs[1] + "0000000000000000000000000000000000000000000000000000000000000220" + // offset to disputeGameConfigs[2] + // DisputeGameConfigs[0] - Cannon + "0000000000000000000000000000000000000000000000000000000000000001" + // enabled=true + "0000000000000000000000000000000000000000000000000de0b6b3a7640000" + // initBond=1e18 + "0000000000000000000000000000000000000000000000000000000000000000" + // gameType=0 (Cannon) + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to gameArgs + "0000000000000000000000000000000000000000000000000000000000000020" + // gameArgs.length (32 bytes) + "5052455354415445000000000000000000000000000000000000000000000000" + // gameArgs data "PRESTATE" + // DisputeGameConfigs[1] - PermissionedCannon + "0000000000000000000000000000000000000000000000000000000000000001" + // enabled=true + "0000000000000000000000000000000000000000000000000de0b6b3a7640000" + // initBond=1e18 + "0000000000000000000000000000000000000000000000000000000000000001" + // gameType=1 (PermissionedCannon) + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to gameArgs + "0000000000000000000000000000000000000000000000000000000000000060" + // gameArgs.length (96 bytes) + "5052455354415445000000000000000000000000000000000000000000000000" + // gameArgs data "PRESTATE" + "0000000000000000000000005000000000000000000000000000000000000000" + // proposer address + "0000000000000000000000004300000000000000000000000000000000000000" + // challenger address + // DisputeGameConfigs[2] - CannonKona (disabled) + "0000000000000000000000000000000000000000000000000000000000000000" + // enabled=false + "0000000000000000000000000000000000000000000000000000000000000000" + // initBond=0 + "0000000000000000000000000000000000000000000000000000000000000008" + // gameType=8 (CannonKona) + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to gameArgs + "0000000000000000000000000000000000000000000000000000000000000000" + // gameArgs.length (0) + // ExtraInstructions array + "0000000000000000000000000000000000000000000000000000000000000002" + // extraInstructions.length (2) + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to extraInstructions[0] + "0000000000000000000000000000000000000000000000000000000000000100" + // offset to extraInstructions[1] + // ExtraInstructions[0] - PermittedProxyDeployment + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to key + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to data + "0000000000000000000000000000000000000000000000000000000000000018" + // key.length (24 bytes) + "5065726d697474656450726f78794465706c6f796d656e74000000000000000" + // "PermittedProxyDeployment" + "0" + // padding + "000000000000000000000000000000000000000000000000000000000000000b" + // data.length (11 bytes) + "44656c617965645745544800000000000000000000000000000000000000000" + // "DelayedWETH" + "0" + // padding + // ExtraInstructions[1] - useCustomGasToken override + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to key + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to data + "000000000000000000000000000000000000000000000000000000000000001f" + // key.length (31 bytes) + "6f76657272696465732e6366672e757365437573746f6d476173546f6b656e00" + // "overrides.cfg.useCustomGasToken" + "0000000000000000000000000000000000000000000000000000000000000020" + // data.length (32 bytes) + "0000000000000000000000000000000000000000000000000000000000000000" // data (32 zero bytes) + + require.Equal(t, expected, hex.EncodeToString(encodedData), "Encoded calldata should match expected structure") + + err = embedded.DefaultUpgrader.Upgrade(host, upgradeConfigBytes) + require.NoError(t, err, "OPCM V2 chain upgrade should succeed") + }) + }) }) } diff --git a/op-deployer/pkg/deployer/integration_test/cli/manage_add_game_type_v2_test.go b/op-deployer/pkg/deployer/integration_test/cli/manage_add_game_type_v2_test.go new file mode 100644 index 0000000000000..a90bafc1f00de --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/manage_add_game_type_v2_test.go @@ -0,0 +1,315 @@ +package cli + +import ( + "context" + "encoding/hex" + "encoding/json" + "log/slog" + "math/big" + "os" + "path/filepath" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/bootstrap" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade/embedded" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum-optimism/optimism/op-service/testutils/devnet" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +func TestManageAddGameTypeV2_CLI(t *testing.T) { + t.Run("missing required flag --config", func(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + runner.ExpectErrorContains(t, []string{ + "manage", "add-game-type-v2", + "--l1-rpc-url", runner.l1RPC, + }, nil, "missing required flag: config") + }) + + t.Run("missing required flag --l1-rpc-url", func(t *testing.T) { + runner := NewCLITestRunner(t) + workDir := runner.GetWorkDir() + configFile := filepath.Join(workDir, "config.json") + + // Create a minimal valid config file + config := embedded.UpgradeOPChainInput{ + Prank: common.Address{0x01}, + Opcm: common.Address{0x02}, + UpgradeInputV2: &embedded.UpgradeInputV2{ + SystemConfig: common.Address{0x03}, + DisputeGameConfigs: []embedded.DisputeGameConfig{}, + ExtraInstructions: []embedded.ExtraInstruction{}, + }, + } + configData, err := json.Marshal(config) + require.NoError(t, err) + require.NoError(t, os.WriteFile(configFile, configData, 0o644)) + + runner.ExpectErrorContains(t, []string{ + "manage", "add-game-type-v2", + "--config", configFile, + }, nil, "missing required flag: l1-rpc-url") + }) + + t.Run("invalid config file path", func(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + runner.ExpectErrorContains(t, []string{ + "manage", "add-game-type-v2", + "--config", "/nonexistent/path/config.json", + "--l1-rpc-url", runner.l1RPC, + }, nil, "failed to read config file") + }) + + t.Run("invalid JSON config file", func(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + workDir := runner.GetWorkDir() + configFile := filepath.Join(workDir, "invalid_config.json") + + // Write invalid JSON + require.NoError(t, os.WriteFile(configFile, []byte("{invalid json}"), 0o644)) + + runner.ExpectErrorContains(t, []string{ + "manage", "add-game-type-v2", + "--config", configFile, + "--l1-rpc-url", runner.l1RPC, + }, nil, "failed to upgrade") + }) + + t.Run("config file missing required fields", func(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + workDir := runner.GetWorkDir() + configFile := filepath.Join(workDir, "incomplete_config.json") + + // Create config missing prank or opcm + config := map[string]interface{}{ + "prank": common.Address{0x01}.Hex(), + // Missing opcm + } + configData, err := json.Marshal(config) + require.NoError(t, err) + require.NoError(t, os.WriteFile(configFile, configData, 0o644)) + + runner.ExpectErrorContains(t, []string{ + "manage", "add-game-type-v2", + "--config", configFile, + "--l1-rpc-url", runner.l1RPC, + }, nil, "failed to upgrade") + }) +} + +// Tests the manage add-game-type-v2 command, from the CLI to the actual contract execution through the Solidity scripts. +func TestManageAddGameTypeV2_Integration(t *testing.T) { + // TODO(#18718): Update this to use an actual deployed OPCM V2 contract once we have one. + // For now, we manually deploy the OPCM V2 contract using bootstrap.Implementations. + lgr := testlog.Logger(t, slog.LevelDebug) + + l1Rpc, stopL1, err := devnet.NewForkedSepolia(lgr) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, stopL1()) + }) + runner := NewCLITestRunnerWithNetwork(t, WithL1RPC(l1Rpc.RPCUrl())) + workDir := runner.GetWorkDir() + + // Test values - using arbitrary addresses for testing + l1ProxyAdminOwner := deployer.DefaultL1ProxyAdminOwnerSepolia + systemConfigProxy := deployer.DefaultSystemConfigProxySepolia + + // Deploy the OPCM V2 contract. + opcmV2 := deployDependencies(t, runner) + + bytes32Type := deployer.Bytes32Type + addressType := deployer.AddressType + + // FaultDisputeGameConfig just needs absolutePrestate (bytes32) + testPrestate := common.Hash{'P', 'R', 'E', 'S', 'T', 'A', 'T', 'E'} + cannonArgs, err := abi.Arguments{{Type: bytes32Type}}.Pack(testPrestate) + require.NoError(t, err) + + // PermissionedDisputeGameConfig needs absolutePrestate, proposer, challenger + testProposer := common.Address{'P'} + testChallenger := common.Address{'C'} + permissionedArgs, err := abi.Arguments{ + {Type: bytes32Type}, + {Type: addressType}, + {Type: addressType}, + }.Pack(testPrestate, testProposer, testChallenger) + require.NoError(t, err) + + testConfig := embedded.UpgradeOPChainInput{ + Prank: l1ProxyAdminOwner, + Opcm: opcmV2, + UpgradeInputV2: &embedded.UpgradeInputV2{ + SystemConfig: systemConfigProxy, + DisputeGameConfigs: []embedded.DisputeGameConfig{ + { + Enabled: true, + InitBond: big.NewInt(1000000000000000000), + GameType: embedded.GameTypeCannon, + GameArgs: cannonArgs, + }, + { + Enabled: true, + InitBond: big.NewInt(1000000000000000000), + GameType: embedded.GameTypePermissionedCannon, + GameArgs: permissionedArgs, + }, + { + Enabled: false, + InitBond: big.NewInt(0), + GameType: embedded.GameTypeCannonKona, + GameArgs: []byte{}, // Disabled games don't need args + }, + }, + ExtraInstructions: []embedded.ExtraInstruction{ + { + Key: "PermittedProxyDeployment", + Data: []byte("DelayedWETH"), + }, + { + // TODO(#18502): Remove this extra instruction after U18 ships. + Key: "overrides.cfg.useCustomGasToken", + Data: make([]byte, 32), + }, + }, + }, + } + + configFile := filepath.Join(workDir, "add_game_type_v2_config.json") + outputFile := filepath.Join(workDir, "add_game_type_v2_output.json") + + configData, err := json.MarshalIndent(testConfig, "", " ") + require.NoError(t, err) + require.NoError(t, os.WriteFile(configFile, configData, 0o644)) + + // Run the CLI command + output := runner.ExpectSuccess(t, []string{ + "manage", "add-game-type-v2", + "--config", configFile, + "--l1-rpc-url", runner.l1RPC, + "--outfile", outputFile, + }, nil) + + t.Logf("Command output (logs):\n%s", output) + + // Verify output file was created + require.FileExists(t, outputFile) + data, err := os.ReadFile(outputFile) + require.NoError(t, err) + + // Verify the file is not empty + require.NotEmpty(t, data, "output file should not be empty") + + // Verify the file contains valid JSON + require.True(t, json.Valid(data), "output file should contain valid JSON") + + // Verify the JSON can be unmarshaled into the expected structure + var dump []broadcaster.CalldataDump + require.NoError(t, json.Unmarshal(data, &dump)) + + t.Logf("Add game type v2 generated calldata: %s", string(data)) + + // Verify the calldata structure + require.Len(t, dump, 1) + require.Equal(t, l1ProxyAdminOwner.Hex(), dump[0].To.Hex(), "calldata should be sent to prank address") + + // Verify the calldata has the correct function selector for opcm.upgrade + // The selector for `upgrade((address,(bool,uint256,uint32,bytes)[],(string,bytes)[]))` is 0x8a847e2e + calldata := dump[0].Data + require.GreaterOrEqual(t, len(calldata), 4, "calldata should be at least 4 bytes for function selector") + + expectedSelector := common.FromHex("8a847e2e") + actualSelector := calldata[:4] + require.Equal(t, hex.EncodeToString(expectedSelector), hex.EncodeToString(actualSelector), + "calldata should contain opcmV2.upgrade function selector 0x8a847e2e, got: %s", hex.EncodeToString(actualSelector)) + + // Verify the calldata contains the correct upgrade input + // We construct the expected calldata from testConfig + expectedEncodedParams, err := testConfig.EncodedUpgradeInputV2() + require.NoError(t, err, "failed to encode expected upgrade input") + + // Construct expected calldata: function selector + encoded parameters + expectedCalldata := append(expectedSelector, expectedEncodedParams...) + + // Compare the full calldata (excluding the selector which we already verified) + require.Equal(t, len(expectedCalldata), len(calldata), + "calldata length mismatch: expected %d bytes, got %d bytes", len(expectedCalldata), len(calldata)) + + // Compare the encoded parameters (skip the 4-byte selector) + require.Equal(t, hex.EncodeToString(expectedEncodedParams), hex.EncodeToString(calldata[4:]), + "encoded upgrade input parameters do not match expected values") + + // Verify To is the prank address + require.Equal(t, l1ProxyAdminOwner.Hex(), dump[0].To.Hex(), "calldata should be sent to prank address") +} + +// TODO(#18718): Remove this once we have a deployed OPCM V2 contract. +// deployDependencies deploys the superchain contracts and OPCM V2 implementation +// using the DeployImplementations script, and returns the OPCM V2 address +func deployDependencies(t *testing.T, runner *CLITestRunner) common.Address { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) + + // First, deploy superchain contracts (required for OPCM deployment) + superchainProxyAdminOwner := common.Address{'S'} + superchainOut, err := bootstrap.Superchain(ctx, bootstrap.SuperchainConfig{ + L1RPCUrl: runner.l1RPC, + PrivateKey: runner.privateKeyHex, + ArtifactsLocator: artifacts.EmbeddedLocator, + Logger: runner.lgr, + SuperchainProxyAdminOwner: superchainProxyAdminOwner, + ProtocolVersionsOwner: common.Address{'P'}, + Guardian: common.Address{'G'}, + Paused: false, + RequiredProtocolVersion: params.ProtocolVersionV0{Major: 1}.Encode(), + RecommendedProtocolVersion: params.ProtocolVersionV0{Major: 2}.Encode(), + CacheDir: testCacheDir, + }) + require.NoError(t, err, "Failed to deploy superchain contracts") + + // Deploy implementations with OPCM V2 enabled + implOut, err := bootstrap.Implementations(ctx, bootstrap.ImplementationsConfig{ + L1RPCUrl: runner.l1RPC, + PrivateKey: runner.privateKeyHex, + ArtifactsLocator: artifacts.EmbeddedLocator, + Logger: runner.lgr, + WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, + MinProposalSizeBytes: standard.MinProposalSizeBytes, + ChallengePeriodSeconds: standard.ChallengePeriodSeconds, + ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, + DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, + MIPSVersion: int(standard.MIPSVersion), + DevFeatureBitmap: deployer.OPCMV2DevFlag, // Enable OPCM V2 + SuperchainConfigProxy: superchainOut.SuperchainConfigProxy, + ProtocolVersionsProxy: superchainOut.ProtocolVersionsProxy, + SuperchainProxyAdmin: superchainOut.SuperchainProxyAdmin, + L1ProxyAdminOwner: superchainProxyAdminOwner, + Challenger: common.Address{'C'}, + CacheDir: testCacheDir, + FaultGameMaxGameDepth: standard.DisputeMaxGameDepth, + FaultGameSplitDepth: standard.DisputeSplitDepth, + FaultGameClockExtension: standard.DisputeClockExtension, + FaultGameMaxClockDuration: standard.DisputeMaxClockDuration, + }) + require.NoError(t, err, "Failed to deploy implementations") + + // Verify OPCM V2 was deployed + require.NotEqual(t, common.Address{}, implOut.OpcmV2, "OPCM V2 address should be set") + require.Equal(t, common.Address{}, implOut.Opcm, "OPCM V1 address should be zero when V2 is deployed") + + t.Logf("Deployed OPCM V2 at address: %s", implOut.OpcmV2.Hex()) + t.Logf("SuperchainConfigProxy: %s", superchainOut.SuperchainConfigProxy.Hex()) + + return implOut.OpcmV2 +} diff --git a/op-deployer/pkg/deployer/manage/add_game_type.go b/op-deployer/pkg/deployer/manage/add_game_type.go index 5ee3d36ed29bb..86af9d870c825 100644 --- a/op-deployer/pkg/deployer/manage/add_game_type.go +++ b/op-deployer/pkg/deployer/manage/add_game_type.go @@ -12,6 +12,8 @@ import ( "github.com/ethereum-optimism/optimism/op-service/cliutil" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade/embedded" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" @@ -297,3 +299,10 @@ func AddGameType(ctx context.Context, cfg AddGameTypeConfig) (opcm.AddGameTypeOu return output, calldata, nil } + +// AddGameTypeV2CLI is the CLI command for adding a new game type to the chain using the OPContractsManager V2 +// This command is just an alias for the upgrade command with the default upgrader, therefore users can perform V1 upgrades +// through it. +func AddGameTypeOPCMV2CLI(cliCtx *cli.Context) error { + return upgrade.UpgradeCLI(embedded.DefaultUpgrader)(cliCtx) +} diff --git a/op-deployer/pkg/deployer/manage/flags.go b/op-deployer/pkg/deployer/manage/flags.go index 1f3185d780ed4..b678b1fc04015 100644 --- a/op-deployer/pkg/deployer/manage/flags.go +++ b/op-deployer/pkg/deployer/manage/flags.go @@ -3,6 +3,7 @@ package manage import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/urfave/cli/v2" ) @@ -163,6 +164,18 @@ var Commands = cli.Commands{ }, oplog.CLIFlags(deployer.EnvVarPrefix)...), Action: AddGameTypeCLI, }, + &cli.Command{ + Name: "add-game-type-v2", + Usage: "allows to add new game types to the chain using the OPContractsManager V2", + Flags: append([]cli.Flag{ + deployer.L1RPCURLFlag, + upgrade.ConfigFlag, + upgrade.OverrideArtifactsURLFlag, + upgrade.OutfileFlag, + deployer.CacheDirFlag, + }, oplog.CLIFlags(deployer.EnvVarPrefix)...), + Action: AddGameTypeOPCMV2CLI, + }, &cli.Command{ Name: "migrate", Usage: "Migrates the chain to use superproofs", diff --git a/op-deployer/pkg/deployer/opcm/asterisc.go b/op-deployer/pkg/deployer/opcm/asterisc.go deleted file mode 100644 index 01f3bb7cbdd48..0000000000000 --- a/op-deployer/pkg/deployer/opcm/asterisc.go +++ /dev/null @@ -1,21 +0,0 @@ -package opcm - -import ( - "github.com/ethereum-optimism/optimism/op-chain-ops/script" - "github.com/ethereum/go-ethereum/common" -) - -type DeployAsteriscInput struct { - PreimageOracle common.Address -} - -type DeployAsteriscOutput struct { - AsteriscSingleton common.Address -} - -type DeployAsteriscScript script.DeployScriptWithOutput[DeployAsteriscInput, DeployAsteriscOutput] - -// NewDeployAsteriscScript loads and validates the DeployAsterisc script contract -func NewDeployAsteriscScript(host *script.Host) (DeployAsteriscScript, error) { - return script.NewDeployScriptWithOutputFromFile[DeployAsteriscInput, DeployAsteriscOutput](host, "DeployAsterisc.s.sol", "DeployAsterisc") -} diff --git a/op-deployer/pkg/deployer/opcm/asterisc_test.go b/op-deployer/pkg/deployer/opcm/asterisc_test.go deleted file mode 100644 index 79ce60e81925a..0000000000000 --- a/op-deployer/pkg/deployer/opcm/asterisc_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package opcm - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestNewDeployAsteriscScript(t *testing.T) { - t.Run("should not fail with current version of DeployAsterisc contract", func(t *testing.T) { - // First we grab a test host - host1 := createTestHost(t) - - // Then we load the script - // - // This would raise an error if the Go types didn't match the ABI - deploySuperchain, err := NewDeployAsteriscScript(host1) - require.NoError(t, err) - - // Then we deploy - output, err := deploySuperchain.Run(DeployAsteriscInput{ - PreimageOracle: common.BigToAddress(big.NewInt(1)), - }) - - // And do some simple asserts - require.NoError(t, err) - require.NotNil(t, output) - }) -} diff --git a/op-deployer/pkg/deployer/opcm/contract.go b/op-deployer/pkg/deployer/opcm/contract.go index b928c49e132bc..b4175ae86f618 100644 --- a/op-deployer/pkg/deployer/opcm/contract.go +++ b/op-deployer/pkg/deployer/opcm/contract.go @@ -41,7 +41,7 @@ func (c *Contract) GetAddressByNameViaAddressManager(ctx context.Context, name s inputs := abi.Arguments{ abi.Argument{ Name: "_name", - Type: mustType("string"), + Type: MustType("string"), Indexed: false, }, } @@ -64,7 +64,7 @@ func (c *Contract) callContractMethod(ctx context.Context, methodName string, in abi.Arguments{ abi.Argument{ Name: "address", - Type: mustType("address"), + Type: MustType("address"), Indexed: false, }, }, @@ -98,7 +98,7 @@ func (c *Contract) callContractMethod(ctx context.Context, methodName string, in return addr, nil } -func mustType(t string) abi.Type { +func MustType(t string) abi.Type { typ, err := abi.NewType(t, "", nil) if err != nil { panic(err) diff --git a/op-deployer/pkg/deployer/opcm/dispute_game_test.go b/op-deployer/pkg/deployer/opcm/dispute_game_test.go index 0c51c89c0e66a..6e1ba4f296016 100644 --- a/op-deployer/pkg/deployer/opcm/dispute_game_test.go +++ b/op-deployer/pkg/deployer/opcm/dispute_game_test.go @@ -68,10 +68,10 @@ func deployDisputeGameScriptVM(t *testing.T, host *script.Host) common.Address { preimageOracleAddress, err := host.Create(addresses.ScriptDeployer, append(preimageOracleArtifact.Bytecode.Object, encodedPreimageOracleConstructor...)) require.NoError(t, err) - bigStepperArtifact, err := host.Artifacts().ReadArtifact("RISCV.sol", "RISCV") + bigStepperArtifact, err := host.Artifacts().ReadArtifact("MIPS64.sol", "MIPS64") require.NoError(t, err) - encodedBigStepperConstructor, err := bigStepperArtifact.ABI.Pack("", preimageOracleAddress) + encodedBigStepperConstructor, err := bigStepperArtifact.ABI.Pack("", preimageOracleAddress, new(big.Int).SetUint64(standard.MIPSVersion)) require.NoError(t, err) bigStepperAddress, err := host.Create(addresses.ScriptDeployer, append(bigStepperArtifact.Bytecode.Object, encodedBigStepperConstructor...)) diff --git a/op-deployer/pkg/deployer/opcm/implementations.go b/op-deployer/pkg/deployer/opcm/implementations.go index 07c367cc1d8c6..5438a2f2758d1 100644 --- a/op-deployer/pkg/deployer/opcm/implementations.go +++ b/op-deployer/pkg/deployer/opcm/implementations.go @@ -36,6 +36,7 @@ type DeployImplementationsOutput struct { OpcmInteropMigrator common.Address `json:"opcmInteropMigratorAddress"` OpcmStandardValidator common.Address `json:"opcmStandardValidatorAddress"` OpcmUtils common.Address `json:"opcmUtilsAddress"` + OpcmMigrator common.Address `json:"opcmMigratorAddress"` OpcmV2 common.Address `json:"opcmV2Address"` OpcmContainer common.Address `json:"opcmContainerAddress"` DelayedWETHImpl common.Address `json:"delayedWETHImplAddress"` diff --git a/op-deployer/pkg/deployer/opcm/opchain.go b/op-deployer/pkg/deployer/opcm/opchain.go index 115bb14f9af2e..a0298c71b6654 100644 --- a/op-deployer/pkg/deployer/opcm/opchain.go +++ b/op-deployer/pkg/deployer/opcm/opchain.go @@ -43,6 +43,7 @@ type DeployOPChainInput struct { OperatorFeeScalar uint32 OperatorFeeConstant uint64 + SuperchainConfig common.Address UseCustomGasToken bool } diff --git a/op-deployer/pkg/deployer/opcm/scripts.go b/op-deployer/pkg/deployer/opcm/scripts.go index 2ea9fe0be3a96..120b41155a596 100644 --- a/op-deployer/pkg/deployer/opcm/scripts.go +++ b/op-deployer/pkg/deployer/opcm/scripts.go @@ -10,7 +10,6 @@ import ( type Scripts struct { DeployAlphabetVM DeployAlphabetVMScript DeployAltDA DeployAltDAScript - DeployAsterisc DeployAsteriscScript DeployDisputeGame DeployDisputeGameScript DeployImplementations DeployImplementationsScript DeployMIPS DeployMIPSScript @@ -41,11 +40,6 @@ func NewScripts(host *script.Host) (*Scripts, error) { return nil, fmt.Errorf("failed to load DeployAltDA script: %w", err) } - deployAsterisc, err := NewDeployAsteriscScript(host) - if err != nil { - return nil, fmt.Errorf("failed to load DeployAsterisc script: %w", err) - } - deployDisputeGame, err := NewDeployDisputeGameScript(host) if err != nil { return nil, fmt.Errorf("failed to load DeployDisputeGame script: %w", err) @@ -64,7 +58,6 @@ func NewScripts(host *script.Host) (*Scripts, error) { return &Scripts{ DeployAlphabetVM: deployAlphabetVM, DeployAltDA: deployAltDA, - DeployAsterisc: deployAsterisc, DeployDisputeGame: deployDisputeGame, DeployMIPS: deployMIPSScript, DeployImplementations: deployImplementations, diff --git a/op-deployer/pkg/deployer/opcm/scripts_test.go b/op-deployer/pkg/deployer/opcm/scripts_test.go index e0826eb1005a7..dc563e5f4f159 100644 --- a/op-deployer/pkg/deployer/opcm/scripts_test.go +++ b/op-deployer/pkg/deployer/opcm/scripts_test.go @@ -22,7 +22,6 @@ func TestNewScripts(t *testing.T) { require.NotNil(t, scripts.DeploySuperchain) require.NotNil(t, scripts.DeployAlphabetVM) require.NotNil(t, scripts.DeployAltDA) - require.NotNil(t, scripts.DeployAsterisc) require.NotNil(t, scripts.DeployDisputeGame) require.NotNil(t, scripts.DeployMIPS) }) diff --git a/op-deployer/pkg/deployer/pipeline/implementations.go b/op-deployer/pkg/deployer/pipeline/implementations.go index 138186f905af2..e3a01d14dfb23 100644 --- a/op-deployer/pkg/deployer/pipeline/implementations.go +++ b/op-deployer/pkg/deployer/pipeline/implementations.go @@ -74,6 +74,8 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro OpcmUpgraderImpl: dio.OpcmUpgrader, OpcmInteropMigratorImpl: dio.OpcmInteropMigrator, OpcmStandardValidatorImpl: dio.OpcmStandardValidator, + OpcmV2Impl: dio.OpcmV2, + OpcmContainerImpl: dio.OpcmContainer, DelayedWethImpl: dio.DelayedWETHImpl, OptimismPortalImpl: dio.OptimismPortalImpl, OptimismPortalInteropImpl: dio.OptimismPortalInteropImpl, diff --git a/op-deployer/pkg/deployer/pipeline/opchain.go b/op-deployer/pkg/deployer/pipeline/opchain.go index 2ef41d73d609e..5fad788d25e9e 100644 --- a/op-deployer/pkg/deployer/pipeline/opchain.go +++ b/op-deployer/pkg/deployer/pipeline/opchain.go @@ -104,6 +104,15 @@ func makeDCI(intent *state.Intent, thisIntent *state.ChainIntent, chainID common return opcm.DeployOPChainInput{}, fmt.Errorf("error merging proof params from overrides: %w", err) } + // Select which OPCM to use based on dev feature flag + opcmAddr := st.ImplementationsDeployment.OpcmImpl + if devFeatureBitmap, ok := intent.GlobalDeployOverrides["devFeatureBitmap"].(common.Hash); ok { + opcmV2Flag := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000010000") + if isDevFeatureEnabled(devFeatureBitmap, opcmV2Flag) && st.ImplementationsDeployment.OpcmV2Impl != (common.Address{}) { + opcmAddr = st.ImplementationsDeployment.OpcmV2Impl + } + } + return opcm.DeployOPChainInput{ OpChainProxyAdminOwner: thisIntent.Roles.L1ProxyAdminOwner, SystemConfigOwner: thisIntent.Roles.SystemConfigOwner, @@ -114,7 +123,7 @@ func makeDCI(intent *state.Intent, thisIntent *state.ChainIntent, chainID common BasefeeScalar: standard.BasefeeScalar, BlobBaseFeeScalar: standard.BlobBaseFeeScalar, L2ChainId: chainID.Big(), - Opcm: st.ImplementationsDeployment.OpcmImpl, + Opcm: opcmAddr, SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization GasLimit: thisIntent.GasLimit, DisputeGameType: proofParams.DisputeGameType, @@ -126,6 +135,7 @@ func makeDCI(intent *state.Intent, thisIntent *state.ChainIntent, chainID common AllowCustomDisputeParameters: proofParams.DangerouslyAllowCustomDisputeParameters, OperatorFeeScalar: thisIntent.OperatorFeeScalar, OperatorFeeConstant: thisIntent.OperatorFeeConstant, + SuperchainConfig: st.SuperchainDeployment.SuperchainConfigProxy, UseCustomGasToken: thisIntent.IsCustomGasTokenEnabled(), }, nil } @@ -170,3 +180,14 @@ func shouldDeployOPChain(st *state.State, chainID common.Hash) bool { return true } + +// isDevFeatureEnabled checks if a specific development feature is enabled in a feature bitmap. +// This mirrors the function in devfeatures.go to avoid import cycles. +func isDevFeatureEnabled(bitmap, flag common.Hash) bool { + b := new(big.Int).SetBytes(bitmap[:]) + f := new(big.Int).SetBytes(flag[:]) + + featuresIsNonZero := f.Cmp(big.NewInt(0)) != 0 + bitmapContainsFeatures := new(big.Int).And(b, f).Cmp(f) == 0 + return featuresIsNonZero && bitmapContainsFeatures +} diff --git a/op-deployer/pkg/deployer/upgrade/embedded/testdata/config.json b/op-deployer/pkg/deployer/upgrade/embedded/testdata/config.json new file mode 100644 index 0000000000000..07e0c2046f106 --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/embedded/testdata/config.json @@ -0,0 +1,16 @@ +{ + "prank": "0x1Eb2fFc903729a0F03966B917003800b145F56E2", + "opcm": "0xaf334f4537e87f5155d135392ff6d52f1866465e", + "upgradeInput": { + "systemConfig": "0x034edD2A225f7f429A63E0f1D2084B9E0A93b538", + "disputeGameConfigs": [ + { + "enabled": true, + "initBond": "0x0", + "gameType": 0, + "gameArgs": "0x" + } + ], + "extraInstructions": [] + } +} diff --git a/op-deployer/pkg/deployer/upgrade/embedded/upgrade.go b/op-deployer/pkg/deployer/upgrade/embedded/upgrade.go index 9ae6f07c63d47..49e20d2436c44 100644 --- a/op-deployer/pkg/deployer/upgrade/embedded/upgrade.go +++ b/op-deployer/pkg/deployer/upgrade/embedded/upgrade.go @@ -3,6 +3,7 @@ package embedded import ( "encoding/json" "fmt" + "math/big" "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" @@ -11,34 +12,112 @@ import ( "github.com/lmittmann/w3" ) +// ScriptInput represents the input struct that is actually passed to the script. +// It contains the prank, opcm, and upgrade input. +type ScriptInput struct { + Prank common.Address `evm:"prank"` + Opcm common.Address `evm:"opcm"` + UpgradeInput []byte `evm:"upgradeInput"` +} + +// UpgradeOPChainInput represents the struct that is read from the config file. +// It contains both fields for the old and new upgrade input. type UpgradeOPChainInput struct { - Prank common.Address `json:"prank"` - Opcm common.Address `json:"opcm"` - EncodedChainConfigs []OPChainConfig `evm:"-" json:"chainConfigs"` + Prank common.Address `json:"prank"` + Opcm common.Address `json:"opcm"` + ChainConfigs []OPChainConfig `json:"chainConfigs,omitempty"` + UpgradeInputV2 *UpgradeInputV2 `json:"upgradeInput,omitempty"` +} + +// UpgradeInputV2 represents the new upgrade input in OPCM v2. +type UpgradeInputV2 struct { + SystemConfig common.Address `json:"systemConfig"` + DisputeGameConfigs []DisputeGameConfig `json:"disputeGameConfigs"` + ExtraInstructions []ExtraInstruction `json:"extraInstructions"` +} + +// DisputeGameConfig represents the configuration for a dispute game. +type DisputeGameConfig struct { + Enabled bool `json:"enabled"` + InitBond *big.Int `json:"initBond"` + GameType GameType `json:"gameType"` + GameArgs []byte `json:"gameArgs"` } +// ExtraInstruction represents an additional upgrade instruction for the upgrade on OPCM v2. +type ExtraInstruction struct { + Key string `json:"key"` + Data []byte `json:"data"` +} + +// GameType represents the type of dispute game. +type GameType uint32 + +const ( + GameTypeCannon GameType = 0 + GameTypePermissionedCannon GameType = 1 + GameTypeCannonKona GameType = 8 +) + +// OPChainConfig represents the configuration for an OP Chain upgrade on OPCM v1. type OPChainConfig struct { SystemConfigProxy common.Address `json:"systemConfigProxy"` CannonPrestate common.Hash `json:"cannonPrestate"` CannonKonaPrestate common.Hash `json:"cannonKonaPrestate"` } +var upgradeInputEncoder = w3.MustNewFunc("dummy((address systemConfig,(bool enabled,uint256 initBond,uint32 gameType,bytes gameArgs)[] disputeGameConfigs,(string key,bytes data)[] extraInstructions))", + "") + var opChainConfigEncoder = w3.MustNewFunc("dummy((address systemConfigProxy,bytes32 cannonPrestate,bytes32 cannonKonaPrestate)[])", "") -func (u *UpgradeOPChainInput) OpChainConfigs() ([]byte, error) { - data, err := opChainConfigEncoder.EncodeArgs(u.EncodedChainConfigs) +func (u *UpgradeOPChainInput) EncodedOpChainConfigs() ([]byte, error) { + data, err := opChainConfigEncoder.EncodeArgs(u.ChainConfigs) if err != nil { return nil, fmt.Errorf("failed to encode chain configs: %w", err) } return data[4:], nil } +func (u *UpgradeOPChainInput) EncodedUpgradeInputV2() ([]byte, error) { + data, err := upgradeInputEncoder.EncodeArgs(u.UpgradeInputV2) + if err != nil { + return nil, fmt.Errorf("failed to encode upgrade input: %w", err) + } + + return data[4:], nil +} + type UpgradeOPChain struct { Run func(input common.Address) } func Upgrade(host *script.Host, input UpgradeOPChainInput) error { - return opcm.RunScriptVoid(host, input, "UpgradeOPChain.s.sol", "UpgradeOPChain") + // Determine which input format to use and encode it + var encodedUpgradeInput []byte + var encodedError error + + if input.UpgradeInputV2 != nil { + // Prefer V2 input if present + encodedUpgradeInput, encodedError = input.EncodedUpgradeInputV2() + } else if len(input.ChainConfigs) > 0 { + // Fall back to V1 input if V2 is not present + encodedUpgradeInput, encodedError = input.EncodedOpChainConfigs() + } else { + // Neither input format is present + return fmt.Errorf("failed to read either an upgrade input or config array") + } + + if encodedError != nil { + return encodedError + } + + scriptInput := ScriptInput{ + Prank: input.Prank, + Opcm: input.Opcm, + UpgradeInput: encodedUpgradeInput, + } + return opcm.RunScriptVoid[ScriptInput](host, scriptInput, "UpgradeOPChain.s.sol", "UpgradeOPChain") } type Upgrader struct{} diff --git a/op-deployer/pkg/deployer/upgrade/embedded/upgrade_superchainconfig.go b/op-deployer/pkg/deployer/upgrade/embedded/upgrade_superchainconfig.go index a1adb77cbb224..976534fe23576 100644 --- a/op-deployer/pkg/deployer/upgrade/embedded/upgrade_superchainconfig.go +++ b/op-deployer/pkg/deployer/upgrade/embedded/upgrade_superchainconfig.go @@ -9,9 +9,10 @@ import ( ) type UpgradeSuperchainConfigInput struct { - Prank common.Address `json:"prank"` - Opcm common.Address `json:"opcm"` - SuperchainConfig common.Address `json:"superchainConfig"` + Prank common.Address `json:"prank"` + Opcm common.Address `json:"opcm"` + SuperchainConfig common.Address `json:"superchainConfig"` + ExtraInstructions []ExtraInstruction `json:"extraInstructions,omitempty"` } type UpgradeSuperchainConfigScript script.DeployScriptWithoutOutput[UpgradeSuperchainConfigInput] diff --git a/op-deployer/pkg/deployer/upgrade/embedded/upgrade_test.go b/op-deployer/pkg/deployer/upgrade/embedded/upgrade_test.go new file mode 100644 index 0000000000000..c2db9c24269cd --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/embedded/upgrade_test.go @@ -0,0 +1,208 @@ +package embedded + +import ( + "encoding/hex" + "encoding/json" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestUpgradeOPChainInput_UpgradeInputV2(t *testing.T) { + input := &UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + UpgradeInputV2: &UpgradeInputV2{ + SystemConfig: common.Address{0x01}, + DisputeGameConfigs: []DisputeGameConfig{ + { + Enabled: true, + InitBond: big.NewInt(1000), + GameType: GameTypeCannon, + GameArgs: []byte{0x01, 0x02, 0x03}, + }, + }, + ExtraInstructions: []ExtraInstruction{ + { + Key: "test-key", + Data: []byte{0x04, 0x05, 0x06}, + }, + }, + }, + } + data, err := input.EncodedUpgradeInputV2() + + require.NoError(t, err) + require.NotEmpty(t, data) + + expected := "0000000000000000000000000000000000000000000000000000000000000020" + // offset to tuple + "0000000000000000000000000100000000000000000000000000000000000000" + // systemConfig + "0000000000000000000000000000000000000000000000000000000000000060" + // offset to disputeGameConfigs + "0000000000000000000000000000000000000000000000000000000000000160" + // offset to extraInstructions + "0000000000000000000000000000000000000000000000000000000000000001" + // disputeGameConfigs.length + "0000000000000000000000000000000000000000000000000000000000000020" + // offset to disputeGameConfigs[0] + "0000000000000000000000000000000000000000000000000000000000000001" + // disputeGameConfigs[0].enabled + "00000000000000000000000000000000000000000000000000000000000003e8" + // disputeGameConfigs[0].initBond (1000) + "0000000000000000000000000000000000000000000000000000000000000000" + // disputeGameConfigs[0].gameType + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to gameArgs + "0000000000000000000000000000000000000000000000000000000000000003" + // gameArgs.length + "0102030000000000000000000000000000000000000000000000000000000000" + // gameArgs data + "0000000000000000000000000000000000000000000000000000000000000001" + // extraInstructions.length + "0000000000000000000000000000000000000000000000000000000000000020" + // offset to extraInstructions[0] + "0000000000000000000000000000000000000000000000000000000000000040" + // offset to key + "0000000000000000000000000000000000000000000000000000000000000080" + // offset to data + "0000000000000000000000000000000000000000000000000000000000000008" + // key.length + "746573742d6b65790000000000000000000000000000000000000000000000" + // "test-key" + "00" + // padding + "0000000000000000000000000000000000000000000000000000000000000003" + // data.length + "0405060000000000000000000000000000000000000000000000000000000000" // data + + require.Equal(t, expected, hex.EncodeToString(data)) +} + +func TestUpgradeOPChainInput_OpChainConfigs(t *testing.T) { + input := &UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + ChainConfigs: []OPChainConfig{ + { + SystemConfigProxy: common.Address{0x01}, + CannonPrestate: common.Hash{0xaa}, + CannonKonaPrestate: common.Hash{0xbb}, + }, + }, + } + data, err := input.EncodedOpChainConfigs() + + require.NoError(t, err) + require.NotEmpty(t, data) + + expected := "0000000000000000000000000000000000000000000000000000000000000020" + // offset to array + "0000000000000000000000000000000000000000000000000000000000000001" + // array.length + "0000000000000000000000000100000000000000000000000000000000000000" + // systemConfigProxy + "aa00000000000000000000000000000000000000000000000000000000000000" + // cannonPrestate + "bb00000000000000000000000000000000000000000000000000000000000000" // cannonKonaPrestate + + require.Equal(t, expected, hex.EncodeToString(data)) +} + +func TestUpgrader_ValidationErrors(t *testing.T) { + tests := []struct { + name string + input UpgradeOPChainInput + errorContains string + }{ + { + name: "neither input provided - validation fails", + input: UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + }, + errorContains: "failed to read either an upgrade input or config array", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + upgrader := DefaultUpgrader + + // Convert input to JSON to test the Upgrader.Upgrade method + inputJSON, err := json.Marshal(tt.input) + require.NoError(t, err) + + // Call Upgrade with nil host - validation should fail before script execution + err = upgrader.Upgrade(nil, inputJSON) + + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorContains) + }) + } +} + +func TestUpgrader_ValidationPasses(t *testing.T) { + tests := []struct { + name string + input UpgradeOPChainInput + description string + }{ + { + name: "V2 input provided", + input: UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + UpgradeInputV2: &UpgradeInputV2{ + SystemConfig: common.Address{0x01}, + DisputeGameConfigs: []DisputeGameConfig{ + { + Enabled: true, + InitBond: big.NewInt(1000), + GameType: GameTypeCannon, + GameArgs: []byte{0x01, 0x02}, + }, + }, + }, + }, + description: "Validation should pass when V2 input is provided and ShouldAllowV1 is false", + }, + { + name: "only V1 input provided", + input: UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + ChainConfigs: []OPChainConfig{ + { + SystemConfigProxy: common.Address{0x01}, + CannonPrestate: common.Hash{0xaa}, + CannonKonaPrestate: common.Hash{0xbb}, + }, + }, + }, + description: "Validation should pass when V1 input is provided", + }, + { + name: "both inputs provided", + input: UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + UpgradeInputV2: &UpgradeInputV2{ + SystemConfig: common.Address{0x01}, + DisputeGameConfigs: []DisputeGameConfig{ + { + Enabled: true, + InitBond: big.NewInt(1000), + GameType: GameTypeCannon, + GameArgs: []byte{0x01, 0x02}, + }, + }, + }, + ChainConfigs: []OPChainConfig{ + { + SystemConfigProxy: common.Address{0x02}, + CannonPrestate: common.Hash{0xcc}, + CannonKonaPrestate: common.Hash{0xdd}, + }, + }, + }, + description: "Validation should pass when both inputs are provided and ShouldAllowV1 is true (should prefer V2)", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Verify that encoding works (validation passes) + // We test the encoding separately since we can't test the full Upgrade flow without a script host + upgradeInput := tt.input + + // Test that the correct encoding path would be chosen + if upgradeInput.UpgradeInputV2 != nil { + _, err := upgradeInput.EncodedUpgradeInputV2() + require.NoError(t, err, "V2 encoding should succeed when V2 input is present") + } else if len(upgradeInput.ChainConfigs) > 0 { + _, err := upgradeInput.EncodedOpChainConfigs() + require.NoError(t, err, "V1 encoding should succeed when V1 input is present") + } + }) + } +} diff --git a/op-deployer/pkg/deployer/verify/artifacts.go b/op-deployer/pkg/deployer/verify/artifacts.go index 7c598c8efda91..4bb10d5cd0682 100644 --- a/op-deployer/pkg/deployer/verify/artifacts.go +++ b/op-deployer/pkg/deployer/verify/artifacts.go @@ -43,6 +43,7 @@ var contractNameExceptions = map[string]string{ "OpcmUpgrader": "OPContractsManager.sol/OPContractsManagerUpgrader.json", "OpcmInteropMigrator": "OPContractsManager.sol/OPContractsManagerInteropMigrator.json", "OpcmStandardValidator": "OPContractsManagerStandardValidator.sol/OPContractsManagerStandardValidator.json", + "OpcmMigrator": "OPContractsManagerMigrator.sol/OPContractsManagerMigrator.json", "OpcmV2": "OPContractsManagerV2.sol/OPContractsManagerV2.json", "OpcmContainer": "OPContractsManagerContainer.sol/OPContractsManagerContainer.json", "OpcmUtils": "OPContractsManagerUtils.sol/OPContractsManagerUtils.json", diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index b226386aeed91..d1d802866407d 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -244,7 +244,7 @@ func (el *L2ELNode) Start() { } func (el *L2ELNode) PeerWith(peer *L2ELNode) { - sysgo.ConnectP2P(el.ctx, el.require, el.inner.L2EthClient().RPC(), peer.inner.L2EthClient().RPC()) + sysgo.ConnectP2P(el.ctx, el.require, el.inner.L2EthClient().RPC(), peer.inner.L2EthClient().RPC(), false) } func (el *L2ELNode) DisconnectPeerWith(peer *L2ELNode) { diff --git a/op-devstack/sysgo/el_node_identity.go b/op-devstack/sysgo/el_node_identity.go index bd21dfa48d85f..b4d265dd85cd6 100644 --- a/op-devstack/sysgo/el_node_identity.go +++ b/op-devstack/sysgo/el_node_identity.go @@ -3,42 +3,23 @@ package sysgo import ( "crypto/ecdsa" "encoding/hex" - "net" - "strconv" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/p2p/enode" ) type ELNodeIdentity struct { - Key *ecdsa.PrivateKey - Port int - Enode string + Key *ecdsa.PrivateKey + Port int } -func NewELNodeIdentity(addr string, port int) *ELNodeIdentity { +func NewELNodeIdentity(port int) *ELNodeIdentity { key, err := crypto.GenerateKey() if err != nil { panic(err) } - if port <= 0 { - portStr, err := getAvailableLocalPort() - if err != nil { - panic(err) - } - port, err = strconv.Atoi(portStr) - if err != nil { - panic(err) - } - } - ip := net.ParseIP(addr) - if ip == nil { - panic("invalid ip for ELNodeIdentity: " + addr) - } return &ELNodeIdentity{ - Key: key, - Port: port, - Enode: enode.NewV4(&key.PublicKey, ip, port, port).String(), + Key: key, + Port: port, } } diff --git a/op-devstack/sysgo/l2_cl.go b/op-devstack/sysgo/l2_cl.go index c6da8e5f143e5..addda4f04721f 100644 --- a/op-devstack/sysgo/l2_cl.go +++ b/op-devstack/sysgo/l2_cl.go @@ -120,7 +120,7 @@ func WithL2CLNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack func WithL2CLNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, l2FollowSourceID stack.L2CLNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_L2CL_KIND") { case "kona": - panic("kona does not support following") + return WithKonaNodeFollowL2(l2CLID, l1CLID, l1ELID, l2ELID, l2FollowSourceID, opts...) case "supernode": panic("supernode does not support following") default: diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go index dce2891ed6e03..2408279248662 100644 --- a/op-devstack/sysgo/l2_cl_kona.go +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -161,8 +161,25 @@ func (k *KonaNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { var _ L2CLNode = (*KonaNode)(nil) -func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithKonaNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, l2FollowSourceID stack.L2CLNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { + followSource := func(orch *Orchestrator) string { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) + l2CLFollowSource, ok := orch.l2CLs.Get(l2FollowSourceID) + p.Require().True(ok, "l2 CL Follow Source required") + return l2CLFollowSource.UserRPC() + }(orch) + opts = append(opts, L2CLFollowSource(followSource)) + withKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)(orch) + }) +} + +func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(withKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)) +} + +func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) func(orch *Orchestrator) { + return func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) require := p.Require() @@ -236,6 +253,12 @@ func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack envVars = append(envVars, "KONA_METRICS_ENABLED=true") } + if cfg.FollowSource != "" { + envVars = append(envVars, + "KONA_NODE_L2_FOLLOW_SOURCE="+cfg.FollowSource, + ) + } + if cfg.IsSequencer { p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) require.NoError(err, "need p2p key for sequencer") @@ -279,5 +302,5 @@ func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack p.Cleanup(k.Stop) p.Logger().Info("Kona-node is up", "rpc", k.UserRPC()) require.True(orch.l2CLs.SetIfMissing(l2CLID, k), "must not already exist") - }) + } } diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go index 3377ba09209ac..7a74d62f37a42 100644 --- a/op-devstack/sysgo/l2_el_opreth.go +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -265,12 +265,9 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra } if areMetricsEnabled() { - // NB: Instead of getAvailableLocalPort, we should pass "0" so the OS picks its - // own port, but that is not currently logged properly so we cannot parse it. - // See: https://github.com/op-rs/op-reth/issues/333 - metricsPort, err := getAvailableLocalPort() - p.Require().NoError(err, "WithOpReth: getting metrics port") - args = append(args, "--metrics="+metricsPort) + // Use port 0 to let the OS assign a port atomically at bind time. + // The actual port will be discovered by parsing the process logs. + args = append(args, "--metrics=127.0.0.1:0") } if supervisorRPC != "" { diff --git a/op-devstack/sysgo/l2_el_p2p_util.go b/op-devstack/sysgo/l2_el_p2p_util.go index a97633d069ff1..e69662d98ac77 100644 --- a/op-devstack/sysgo/l2_el_p2p_util.go +++ b/op-devstack/sysgo/l2_el_p2p_util.go @@ -3,9 +3,11 @@ package sysgo import ( "context" "slices" + "strings" "time" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" @@ -13,13 +15,13 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testreq" ) -func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID, trusted bool) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() - l2EL1, ok := orch.l2ELs.Get(l2EL1ID) + l2EL1, ok := orch.GetL2EL(l2EL1ID) require.True(ok, "looking for L2 EL node 1 to connect p2p") - l2EL2, ok := orch.l2ELs.Get(l2EL2ID) + l2EL2, ok := orch.GetL2EL(l2EL2ID) require.True(ok, "looking for L2 EL node 2 to connect p2p") require.Equal(l2EL1ID.ChainID(), l2EL2ID.ChainID(), "must be same l2 chain") @@ -33,7 +35,7 @@ func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID) stack.Option[*Orch require.NoError(err, "failed to connect to el2 rpc") defer rpc2.Close() - ConnectP2P(orch.P().Ctx(), require, rpc1, rpc2) + ConnectP2P(orch.P().Ctx(), require, rpc1, rpc2, trusted) }) } @@ -42,23 +44,36 @@ type RpcCaller interface { } // ConnectP2P creates a p2p peer connection between node1 and node2. -func ConnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller) { +func ConnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller, trusted bool) { var targetInfo p2p.NodeInfo require.NoError(acceptor.CallContext(ctx, &targetInfo, "admin_nodeInfo"), "get node info") + targetNode, err := enode.ParseV4(targetInfo.Enode) + require.NoError(err, "failed to parse target node") + expectedID := targetNode.ID().String() + + var initiatorInfo p2p.NodeInfo + require.NoError(initiator.CallContext(ctx, &initiatorInfo, "admin_nodeInfo"), "get initiator node info") var peerAdded bool require.NoError(initiator.CallContext(ctx, &peerAdded, "admin_addPeer", targetInfo.Enode), "add peer") require.True(peerAdded, "should have added peer successfully") + if trusted { + var peerAddedTrusted bool + require.NoError(initiator.CallContext(ctx, &peerAddedTrusted, "admin_addTrustedPeer", targetInfo.Enode), "add trusted peer") + require.True(peerAddedTrusted, "should have added trusted peer successfully") + } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - err := wait.For(ctx, time.Second, func() (bool, error) { + err = wait.For(ctx, time.Second, func() (bool, error) { var peers []peer if err := initiator.CallContext(ctx, &peers, "admin_peers"); err != nil { return false, err } return slices.ContainsFunc(peers, func(p peer) bool { - return p.ID == targetInfo.ID + peerID := strings.TrimPrefix(strings.ToLower(p.ID), "0x") + return peerID == strings.ToLower(expectedID) }), nil }) require.NoError(err, "The peer was not connected") @@ -68,6 +83,9 @@ func ConnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcC func DisconnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller) { var targetInfo p2p.NodeInfo require.NoError(acceptor.CallContext(ctx, &targetInfo, "admin_nodeInfo"), "get node info") + targetNode, err := enode.ParseV4(targetInfo.Enode) + require.NoError(err, "failed to parse target node") + expectedID := targetNode.ID().String() var peerRemoved bool require.NoError(initiator.CallContext(ctx, &peerRemoved, "admin_removePeer", targetInfo.ENR), "add peer") @@ -75,13 +93,14 @@ func DisconnectP2P(ctx context.Context, require *testreq.Assertions, initiator R ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - err := wait.For(ctx, time.Second, func() (bool, error) { + err = wait.For(ctx, time.Second, func() (bool, error) { var peers []peer if err := initiator.CallContext(ctx, &peers, "admin_peers"); err != nil { return false, err } return !slices.ContainsFunc(peers, func(p peer) bool { - return p.ID == targetInfo.ID + peerID := strings.TrimPrefix(strings.ToLower(p.ID), "0x") + return peerID == strings.ToLower(expectedID) }), nil }) require.NoError(err, "The peer was not removed") diff --git a/op-devstack/sysgo/op_rbuilder.go b/op-devstack/sysgo/op_rbuilder.go index 93d4235ffa907..677567aa473f0 100644 --- a/op-devstack/sysgo/op_rbuilder.go +++ b/op-devstack/sysgo/op_rbuilder.go @@ -4,13 +4,11 @@ package sysgo import ( "encoding/hex" "encoding/json" - "net" "os" "path/filepath" "strconv" "strings" "sync" - "time" "github.com/ethereum/go-ethereum/log" @@ -20,6 +18,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" ) @@ -142,16 +141,16 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s if cfg.FlashblocksAddr == "" { cfg.FlashblocksAddr = "127.0.0.1" } - if cfg.FlashblocksPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate flashblocks port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse flashblocks port") - cfg.FlashblocksPort = portVal - } - fbPortStr := strconv.Itoa(cfg.FlashblocksPort) args = append(args, "--flashblocks.enabled") - args = append(args, "--flashblocks.addr="+cfg.FlashblocksAddr, "--flashblocks.port="+fbPortStr) + args = append(args, "--flashblocks.addr="+cfg.FlashblocksAddr) + if cfg.FlashblocksPort > 0 { + // Use explicitly configured port + args = append(args, "--flashblocks.port="+strconv.Itoa(cfg.FlashblocksPort)) + } else { + // Use port 0 to let the OS assign a port atomically at bind time. + // The actual port will be discovered by parsing the process logs. + args = append(args, "--flashblocks.port=0") + } } // P2P configuration: enforce deterministic identity and static peering to the sequencer EL. @@ -176,30 +175,28 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s if cfg.EnableRPC { args = append(args, "--http") args = append(args, "--http.addr="+cfg.RPCAddr) - if cfg.RPCPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate rpc port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse rpc port") - cfg.RPCPort = portVal + if cfg.RPCPort > 0 { + // Use explicitly configured port + args = append(args, "--http.port="+strconv.Itoa(cfg.RPCPort)) + } else { + // Use port 0 to let the OS assign a port atomically at bind time. + // The actual port will be discovered by parsing the process logs. + args = append(args, "--http.port=0") } - rpcPortStr := strconv.Itoa(cfg.RPCPort) - args = append(args, "--http.port="+rpcPortStr) args = append(args, "--http.api="+cfg.RPCAPI) - } if cfg.AuthRPCAddr != "" { args = append(args, "--authrpc.addr="+cfg.AuthRPCAddr) } - if cfg.AuthRPCPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate auth rpc port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse auth rpc port") - cfg.AuthRPCPort = portVal + if cfg.AuthRPCPort > 0 { + // Use explicitly configured port + args = append(args, "--authrpc.port="+strconv.Itoa(cfg.AuthRPCPort)) + } else { + // Use port 0 to let the OS assign a port atomically at bind time. + // The actual port will be discovered by parsing the process logs. + args = append(args, "--authrpc.port=0") } - args = append(args, "--authrpc.port="+strconv.Itoa(cfg.AuthRPCPort)) if cfg.AuthRPCJWTPath != "" { args = append(args, "--authrpc.jwtsecret="+cfg.AuthRPCJWTPath) } @@ -217,22 +214,16 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s if cfg.Chain != "" { args = append(args, "--chain="+cfg.Chain) } - if cfg.WithUnusedPorts { - args = append(args, "--with-unused-ports") - } if cfg.DisableDiscovery { args = append(args, "--disable-discovery") } - if !cfg.WithUnusedPorts { - if cfg.P2PPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate p2p port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse p2p port") - cfg.P2PPort = portVal - } + if cfg.P2PPort > 0 { + // Use explicitly configured P2P port args = append(args, "--port="+strconv.Itoa(cfg.P2PPort)) + } else { + // Use --with-unused-ports to let reth assign P2P port atomically at bind time. + args = append(args, "--with-unused-ports") } if cfg.DataDir == "" { @@ -365,12 +356,60 @@ func (b *OPRBuilderNode) Start() { args, env := cfg.LaunchSpec(b.p) - // Forward structured logs to Go logger + // Create channels for discovering ports from process logs. + // When using port 0, the OS assigns ports at bind time and the process logs them. + flashblocksWSChan := make(chan string, 1) + httpRPCChan := make(chan string, 1) + authRPCChan := make(chan string, 1) + defer close(flashblocksWSChan) + defer close(httpRPCChan) + defer close(authRPCChan) + + // Forward structured logs to Go logger and parse for port discovery logOut := logpipe.ToLogger(b.logger.New("component", "op-OPRbuilderNode", "src", "stdout")) logErr := logpipe.ToLogger(b.logger.New("component", "op-OPRbuilderNode", "src", "stderr")) + // Log parsing callback to extract bound addresses from process output + onLogEntry := func(e logpipe.LogEntry) { + msg := e.LogMessage() + // Flashblocks WS - custom log message from wspub.rs + if strings.HasPrefix(msg, "Flashblocks WebSocketPublisher listening on ") { + addr := strings.TrimPrefix(msg, "Flashblocks WebSocketPublisher listening on ") + if validURL := parseAndValidateAddr(addr, "ws"); validURL != "" { + select { + case flashblocksWSChan <- validURL: + default: + } + } + } + // HTTP RPC - standard reth log message + if msg == "RPC HTTP server started" { + if addr, ok := e.FieldValue("url").(string); ok { + if validURL := parseAndValidateAddr(addr, "http"); validURL != "" { + select { + case httpRPCChan <- validURL: + default: + } + } + } + } + // Auth RPC - standard reth log message + if msg == "RPC auth server started" { + if addr, ok := e.FieldValue("url").(string); ok { + if validURL := parseAndValidateAddr(addr, "http"); validURL != "" { + select { + case authRPCChan <- validURL: + default: + } + } + } + } + } + stdOut := logpipe.LogCallback(func(line []byte) { - logOut(logpipe.ParseRustStructuredLogs(line)) + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) }) stdErr := logpipe.LogCallback(func(line []byte) { logErr(logpipe.ParseRustStructuredLogs(line)) @@ -389,33 +428,26 @@ func (b *OPRBuilderNode) Start() { err = b.sub.Start(execPath, args, env) b.p.Require().NoError(err, "start OPRBuilderNode") - const readinessTimeout = 15 * time.Second - + // Wait for ports to be discovered from logs, then configure proxies if cfg.EnableRPC { - rpcUpstreamHostport := net.JoinHostPort(cfg.RPCAddr, strconv.Itoa(cfg.RPCPort)) - rpcUpstreamURL := "http://" + rpcUpstreamHostport - waitTCPReady(b.p, rpcUpstreamURL, readinessTimeout) - b.logger.Info("OPRBuilderNode upstream RPC ready", "rpc", rpcUpstreamURL) - b.rpcProxy.SetUpstream(ProxyAddr(b.p.Require(), rpcUpstreamURL)) - waitTCPReady(b.p, b.rpcProxyURL, readinessTimeout) + var httpRPCAddr string + b.p.Require().NoError(tasks.Await(b.p.Ctx(), httpRPCChan, &httpRPCAddr), "need HTTP RPC address from logs") + b.logger.Info("OPRBuilderNode upstream RPC ready", "rpc", httpRPCAddr) + b.rpcProxy.SetUpstream(ProxyAddr(b.p.Require(), httpRPCAddr)) b.logger.Info("OPRBuilderNode proxy RPC ready", "proxy_rpc", b.rpcProxyURL) - authUpstreamHostport := net.JoinHostPort(cfg.RPCAddr, strconv.Itoa(cfg.AuthRPCPort)) - authUpstreamURL := "http://" + authUpstreamHostport - waitTCPReady(b.p, authUpstreamURL, readinessTimeout) - b.logger.Info("OPRBuilderNode upstream auth RPC ready", "auth_rpc", authUpstreamURL) - b.authProxy.SetUpstream(ProxyAddr(b.p.Require(), authUpstreamURL)) - waitTCPReady(b.p, b.authProxyURL, readinessTimeout) + var authRPCAddr string + b.p.Require().NoError(tasks.Await(b.p.Ctx(), authRPCChan, &authRPCAddr), "need Auth RPC address from logs") + b.logger.Info("OPRBuilderNode upstream auth RPC ready", "auth_rpc", authRPCAddr) + b.authProxy.SetUpstream(ProxyAddr(b.p.Require(), authRPCAddr)) b.logger.Info("OPRBuilderNode proxy auth RPC ready", "proxy_auth_rpc", b.authProxyURL) } if cfg.EnableFlashblocks { - wsUpstreamHostport := net.JoinHostPort(cfg.FlashblocksAddr, strconv.Itoa(cfg.FlashblocksPort)) - wsUpstreamURL := "ws://" + wsUpstreamHostport - waitWSReady(b.p, wsUpstreamURL, readinessTimeout) - b.logger.Info("OPRBuilderNode upstream WS ready", "ws", wsUpstreamURL) - b.wsProxy.SetUpstream(ProxyAddr(b.p.Require(), wsUpstreamURL)) - waitWSReady(b.p, b.wsProxyURL, readinessTimeout) + var flashblocksAddr string + b.p.Require().NoError(tasks.Await(b.p.Ctx(), flashblocksWSChan, &flashblocksAddr), "need Flashblocks WS address from logs") + b.logger.Info("OPRBuilderNode upstream WS ready", "ws", flashblocksAddr) + b.wsProxy.SetUpstream(ProxyAddr(b.p.Require(), flashblocksAddr)) b.logger.Info("OPRBuilderNode proxy WS ready", "proxy_ws", b.wsProxyURL) } } diff --git a/op-devstack/sysgo/rollup_boost.go b/op-devstack/sysgo/rollup_boost.go index f7bffc40273df..bbbee9a1f4c29 100644 --- a/op-devstack/sysgo/rollup_boost.go +++ b/op-devstack/sysgo/rollup_boost.go @@ -1,7 +1,6 @@ package sysgo import ( - "net" "net/http" "strconv" "strings" @@ -15,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" ) @@ -82,8 +82,6 @@ func (r *RollupBoostNode) Start() { cfg := r.cfg r.p.Require().NotNil(cfg, "rollup-boost config not initialized") - args, env := cfg.LaunchSpec(r.p) - if r.wsProxy == nil { r.wsProxy = tcpproxy.New(r.p.Logger()) r.p.Require().NoError(r.wsProxy.Start()) @@ -98,12 +96,34 @@ func (r *RollupBoostNode) Start() { r.p.Cleanup(func() { r.rpcProxy.Close() }) } + args, env := cfg.LaunchSpec(r.p) + + // Create channel for discovering flashblocks WS port from process logs. + // When using port 0, the OS assigns the port at bind time and the process logs it. + flashblocksWSChan := make(chan string, 1) + defer close(flashblocksWSChan) + // Parse Rust-structured logs and forward into Go logger with attributes logOut := logpipe.ToLogger(r.logger.New("stream", "stdout")) logErr := logpipe.ToLogger(r.logger.New("stream", "stderr")) + // Log parsing callback to extract bound addresses from process output + onLogEntry := func(e logpipe.LogEntry) { + msg := e.LogMessage() + // Flashblocks WS - custom log message from outbound.rs + if strings.HasPrefix(msg, "Flashblocks WebSocketPublisher listening on ") { + addr := strings.TrimPrefix(msg, "Flashblocks WebSocketPublisher listening on ") + select { + case flashblocksWSChan <- "ws://" + addr: + default: + } + } + } + stdOut := logpipe.LogCallback(func(line []byte) { - logOut(logpipe.ParseRustStructuredLogs(line)) + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) }) stdErr := logpipe.LogCallback(func(line []byte) { logErr(logpipe.ParseRustStructuredLogs(line)) @@ -122,6 +142,9 @@ func (r *RollupBoostNode) Start() { err = r.sub.Start(execPath, args, env) r.p.Require().NoError(err, "start rollup-boost") + // RPC port: still uses pre-allocation because rollup-boost doesn't log the actual + // bound RPC address when using port 0. This requires a Rust change to fix. + // TODO: Update rollup-boost to log "RPC server listening on {addr}" and parse it here. rpcUpstreamURL := "http://" + cfg.RPCHost + ":" + strconv.Itoa(int(cfg.RPCPort)) waitTCPReady(r.p, rpcUpstreamURL, 5*time.Second) r.logger.Info("rollup-boost upstream RPC ready", "rpc", rpcUpstreamURL) @@ -129,17 +152,13 @@ func (r *RollupBoostNode) Start() { waitTCPReady(r.p, r.rpcProxyURL, 10*time.Second) r.logger.Info("rollup-boost proxy RPC ready", "proxy_rpc", r.rpcProxyURL) - // WS: wait for upstream first, then configure and test proxy + // Flashblocks WS: discover port from logs, then configure proxy if cfg.EnableFlashblocks { - wsUpstreamHostport := net.JoinHostPort(cfg.FlashblocksHost, strconv.Itoa(cfg.FlashblocksPort)) - wsUpstreamURL := "ws://" + wsUpstreamHostport + var flashblocksAddr string + r.p.Require().NoError(tasks.Await(r.p.Ctx(), flashblocksWSChan, &flashblocksAddr), "need Flashblocks WS address from logs") + r.logger.Info("rollup-boost upstream WS ready", "upstream_ws", flashblocksAddr) - // Wait for upstream WS TCP endpoint - waitTCPReady(r.p, wsUpstreamURL, 5*time.Second) - r.logger.Info("rollup-boost upstream WS ready", "upstream_ws", wsUpstreamURL) - - r.wsProxy.SetUpstream(ProxyAddr(r.p.Require(), wsUpstreamURL)) - waitWSReady(r.p, r.wsProxyURL, 10*time.Second) + r.wsProxy.SetUpstream(ProxyAddr(r.p.Require(), flashblocksAddr)) r.logger.Info("rollup-boost proxy WS ready", "proxy_ws", r.wsProxyURL) } } @@ -272,15 +291,15 @@ func (cfg *RollupBoostConfig) LaunchSpec(p devtest.P) (args []string, env []stri if cfg.FlashblocksHost == "" { cfg.FlashblocksHost = "127.0.0.1" } - if cfg.FlashblocksPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate flashblocks port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse flashblocks port") - cfg.FlashblocksPort = portVal + args = append(args, "--flashblocks", "--flashblocks-host="+cfg.FlashblocksHost) + if cfg.FlashblocksPort > 0 { + // Use explicitly configured port + args = append(args, "--flashblocks-port="+strconv.Itoa(cfg.FlashblocksPort)) + } else { + // Use port 0 to let the OS assign a port atomically at bind time. + // The actual port will be discovered by parsing the process logs. + args = append(args, "--flashblocks-port=0") } - fbPortStr := strconv.Itoa(cfg.FlashblocksPort) - args = append(args, "--flashblocks", "--flashblocks-host="+cfg.FlashblocksHost, "--flashblocks-port="+fbPortStr) if cfg.FlashblocksBuilderURL != "" { args = append(args, "--flashblocks-builder-url="+cfg.FlashblocksBuilderURL) } @@ -319,14 +338,16 @@ func (cfg *RollupBoostConfig) LaunchSpec(p devtest.P) (args []string, env []stri if cfg.DebugHost == "" { cfg.DebugHost = "127.0.0.1" } - if cfg.DebugPort <= 0 { - portStr, err := getAvailableLocalPort() - p.Require().NoError(err, "allocate rollup-boost debug port") - portVal, err := strconv.Atoi(portStr) - p.Require().NoError(err, "parse rollup-boost debug port") - cfg.DebugPort = portVal + args = append(args, "--debug-host="+cfg.DebugHost) + if cfg.DebugPort > 0 { + // Use explicitly configured port + args = append(args, "--debug-server-port="+strconv.Itoa(cfg.DebugPort)) + } else { + // Use port 0 to let the OS assign a port atomically at bind time. + // The debug server logs its bound address, but we don't need to parse it + // since the debug port is only used for manual debugging. + args = append(args, "--debug-server-port=0") } - args = append(args, "--debug-host="+cfg.DebugHost, "--debug-server-port="+strconv.Itoa(cfg.DebugPort)) args = append(args, cfg.ExtraArgs...) diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 01af0b2f4c958..3113d9dc87171 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -602,7 +602,7 @@ func NewDefaultSingleChainSystemWithFlashblocksIDs(l1ID, l2ID eth.ChainID) Singl L2: stack.L2NetworkID(l2ID), L2CL: stack.NewL2CLNodeID("sequencer", l2ID), L2EL: stack.NewL2ELNodeID("sequencer", l2ID), - L2Builder: stack.NewOPRBuilderNodeID("sequencer", l2ID), + L2Builder: stack.NewOPRBuilderNodeID("sequencer-builder", l2ID), L2RollupBoost: stack.NewRollupBoostNodeID("rollup-boost", l2ID), L2Batcher: stack.NewL2BatcherID("main", l2ID), L2Proposer: stack.NewL2ProposerID("main", l2ID), @@ -620,8 +620,8 @@ func DefaultSingleChainSystemWithFlashblocks(dest *SingleChainSystemWithFlashblo func singleChainSystemWithFlashblocksOpts(ids *SingleChainSystemWithFlashblocksIDs, dest *SingleChainSystemWithFlashblocksIDs) stack.CombinedOption[*Orchestrator] { opt := stack.Combine[*Orchestrator]() // Precompute deterministic P2P identity and peering between sequencer EL and op-rbuilder EL. - seqID := NewELNodeIdentity("127.0.0.1", 0) - builderID := NewELNodeIdentity("127.0.0.1", 0) // allocate dynamic port for builder + seqID := NewELNodeIdentity(0) + builderID := NewELNodeIdentity(0) // allocate dynamic port for builder opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { o.P().Logger().Info("Setting up") @@ -639,8 +639,12 @@ func singleChainSystemWithFlashblocksOpts(ids *SingleChainSystemWithFlashblocksI opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - opt.Add(WithL2ELNode(ids.L2EL, L2ELWithP2PConfig("127.0.0.1", seqID.Port, seqID.KeyHex(), []string{builderID.Enode}, nil))) - opt.Add(WithOPRBuilderNode(ids.L2Builder, OPRBuilderWithNodeIdentity(builderID, "127.0.0.1", []string{seqID.Enode}, []string{seqID.Enode}))) + opt.Add(WithL2ELNode(ids.L2EL, L2ELWithP2PConfig("127.0.0.1", seqID.Port, seqID.KeyHex(), nil, nil))) + opt.Add(WithOPRBuilderNode(ids.L2Builder, OPRBuilderWithNodeIdentity(builderID, "127.0.0.1", nil, nil))) + // Sequencer adds builder as regular static peer (not trusted) + opt.Add(WithL2ELP2PConnection(ids.L2EL, stack.L2ELNodeID(ids.L2Builder), false)) + // Builder adds sequencer as trusted peer + opt.Add(WithL2ELP2PConnection(stack.L2ELNodeID(ids.L2Builder), ids.L2EL, true)) opt.Add(WithRollupBoost(ids.L2RollupBoost, ids.L2EL, RollupBoostWithBuilderNode(ids.L2Builder))) opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, stack.L2ELNodeID(ids.L2RollupBoost), L2CLSequencer())) diff --git a/op-devstack/sysgo/system_singlechain_multinode.go b/op-devstack/sysgo/system_singlechain_multinode.go index 75e5285546292..893cdb5f9b663 100644 --- a/op-devstack/sysgo/system_singlechain_multinode.go +++ b/op-devstack/sysgo/system_singlechain_multinode.go @@ -45,7 +45,7 @@ func DefaultSingleChainMultiNodeSystem(dest *DefaultSingleChainMultiNodeSystemID // P2P connect L2CL nodes opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLB)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB)) + opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB, false)) opt.Add(stack.Finally(func(orch *Orchestrator) { *dest = ids diff --git a/op-devstack/sysgo/system_singlechain_twoverifiers.go b/op-devstack/sysgo/system_singlechain_twoverifiers.go index 14c69f0534bd0..2e960517712f7 100644 --- a/op-devstack/sysgo/system_singlechain_twoverifiers.go +++ b/op-devstack/sysgo/system_singlechain_twoverifiers.go @@ -53,11 +53,11 @@ func DefaultSingleChainTwoVerifiersFollowL2System(dest *DefaultSingleChainTwoVer opt.Add(WithL2CLNodeFollowL2(ids.L2CLC, ids.L1CL, ids.L1EL, ids.L2ELC, ids.L2CLB)) opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLB)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB)) + opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB, false)) opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLC)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELC)) + opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELC, false)) opt.Add(WithL2CLP2PConnection(ids.L2CLB, ids.L2CLC)) - opt.Add(WithL2ELP2PConnection(ids.L2ELB, ids.L2ELC)) + opt.Add(WithL2ELP2PConnection(ids.L2ELB, ids.L2ELC, false)) opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) diff --git a/op-devstack/sysgo/util.go b/op-devstack/sysgo/util.go index b3903c26d6900..f323543b4e27b 100644 --- a/op-devstack/sysgo/util.go +++ b/op-devstack/sysgo/util.go @@ -1,18 +1,17 @@ package sysgo import ( - "context" "errors" "fmt" "net" "net/url" "os" "strconv" + "strings" "sync" "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - opclient "github.com/ethereum-optimism/optimism/op-service/client" "github.com/stretchr/testify/assert" ) @@ -94,14 +93,20 @@ func waitTCPReady(p devtest.P, rawURL string, timeout time.Duration) { }, timeout, 100*time.Millisecond, waitMsg) } -// waitWSReady attempts an actual WebSocket handshake to confirm readiness using EventuallyWithT. -func waitWSReady(p devtest.P, rawURL string, timeout time.Duration) { - p.Helper() - waitWSMsg := fmt.Sprintf("WebSocket endpoint %s not ready within %v", rawURL, timeout) - p.Require().EventuallyWithT(func(c *assert.CollectT) { - ctx, cancel := context.WithTimeout(context.Background(), 750*time.Millisecond) - err := opclient.ProbeWS(ctx, rawURL) - cancel() - assert.NoError(c, err, "WebSocket handshake to %s should succeed", rawURL) - }, timeout, 100*time.Millisecond, waitWSMsg) +// parseAndValidateAddr ensures the address has a scheme and is a valid URL. +// Returns the validated URL string or empty string if invalid. +// This is used to parse addresses from process (e.g. op-rbuilder) log output. +func parseAndValidateAddr(addr, defaultScheme string) string { + if addr == "" { + return "" + } + // Add scheme if not present + if !strings.Contains(addr, "://") { + addr = defaultScheme + "://" + addr + } + u, err := url.Parse(addr) + if err != nil || u.Host == "" || u.Hostname() == "" { + return "" + } + return u.String() } diff --git a/op-dispute-mon/README.md b/op-dispute-mon/README.md index d80b1988e5cc3..37aa5ebb145df 100644 --- a/op-dispute-mon/README.md +++ b/op-dispute-mon/README.md @@ -26,10 +26,10 @@ shows the available config options and can be accessed by running `./bin/op-disp --l1-eth-rpc \ --rollup-rpc ,, -# For networks using op-supervisor: +# For networks using op-supernode: ./bin/op-dispute-mon \ --network \ --l1-eth-rpc \ - --supervisor-rpc ,, + --supernode-rpc ,, ``` diff --git a/op-dispute-mon/cmd/main.go b/op-dispute-mon/cmd/main.go index 2fec22e2c1a54..971657f5e31ed 100644 --- a/op-dispute-mon/cmd/main.go +++ b/op-dispute-mon/cmd/main.go @@ -59,7 +59,7 @@ func run(ctx context.Context, args []string, action ConfiguredLifecycle) error { logger.Info("RPC endpoints", "l1", cfg.L1EthRpc, "rollup", cfg.RollupRpcs, - "supervisor", cfg.SupervisorRpcs, + "superNode", cfg.SuperNodeRpcs, ) return action(ctx.Context, logger, cfg) }) diff --git a/op-dispute-mon/cmd/main_test.go b/op-dispute-mon/cmd/main_test.go index 76247b9937085..a2431e89f582a 100644 --- a/op-dispute-mon/cmd/main_test.go +++ b/op-dispute-mon/cmd/main_test.go @@ -60,13 +60,13 @@ func TestL1EthRpc(t *testing.T) { }) } -func TestMustSpecifyEitherRollupRpcOrSupervisorRpc(t *testing.T) { - verifyArgsInvalid(t, "flag rollup-rpc or supervisor-rpc is required", addRequiredArgsExcept("--rollup-rpc")) +func TestMustSpecifyEitherRollupRpcOrSuperNodeRpc(t *testing.T) { + verifyArgsInvalid(t, "flag rollup-rpc or supernode-rpc is required", addRequiredArgsExcept("--rollup-rpc")) } func TestRollupRpc(t *testing.T) { - t.Run("NotRequiredIfSupervisorRpcSupplied", func(t *testing.T) { - configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supervisor-rpc", "http://localhost/supervisor")) + t.Run("NotRequiredIfSuperNodeRpcSupplied", func(t *testing.T) { + configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supernode-rpc", "http://localhost/supernode")) }) t.Run("Valid", func(t *testing.T) { @@ -83,23 +83,23 @@ func TestRollupRpc(t *testing.T) { }) } -func TestSupervisorRpc(t *testing.T) { +func TestSuperNodeRpc(t *testing.T) { t.Run("NotRequiredIfRollupRpcSupplied", func(t *testing.T) { // rollup-rpc is in the default args. - configForArgs(t, addRequiredArgsExcept("--supervisor-rpc")) + configForArgs(t, addRequiredArgsExcept("--supernode-rpc")) }) t.Run("Valid", func(t *testing.T) { url := "http://example.com:9999" - cfg := configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supervisor-rpc", url)) - require.Equal(t, []string{url}, cfg.SupervisorRpcs) + cfg := configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supernode-rpc", url)) + require.Equal(t, []string{url}, cfg.SuperNodeRpcs) }) t.Run("MultipleValues", func(t *testing.T) { url1 := "http://example1.com:9999" url2 := "http://example2.com:8888" - cfg := configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supervisor-rpc", url1, "--supervisor-rpc", url2)) - require.Equal(t, []string{url1, url2}, cfg.SupervisorRpcs) + cfg := configForArgs(t, addRequiredArgsExcept("--rollup-rpc", "--supernode-rpc", url1, "--supernode-rpc", url2)) + require.Equal(t, []string{url1, url2}, cfg.SuperNodeRpcs) }) } diff --git a/op-dispute-mon/config/config.go b/op-dispute-mon/config/config.go index d39ad64a04f88..724d770957fab 100644 --- a/op-dispute-mon/config/config.go +++ b/op-dispute-mon/config/config.go @@ -12,10 +12,10 @@ import ( ) var ( - ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") - ErrMissingGameFactoryAddress = errors.New("missing game factory address") - ErrMissingRollupAndSupervisorRpc = errors.New("must specify rollup rpc or supervisor rpc") - ErrMissingMaxConcurrency = errors.New("missing max concurrency") + ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") + ErrMissingGameFactoryAddress = errors.New("missing game factory address") + ErrMissingRollupAndSuperNodeRpc = errors.New("must specify rollup rpc or super node rpc") + ErrMissingMaxConcurrency = errors.New("missing max concurrency") ) const ( @@ -40,7 +40,7 @@ type Config struct { HonestActors []common.Address // List of honest actors to monitor claims for. RollupRpcs []string // The rollup node RPC URLs. - SupervisorRpcs []string // The supervisor RPC URLs. + SuperNodeRpcs []string // The super node RPC URLs. MonitorInterval time.Duration // Frequency to check for new games to monitor. GameWindow time.Duration // Maximum window to look for games to monitor. IgnoredGames []common.Address // Games to exclude from monitoring @@ -50,19 +50,19 @@ type Config struct { PprofConfig oppprof.CLIConfig } -func NewInteropConfig(gameFactoryAddress common.Address, l1EthRpc string, supervisorRpcs []string) Config { - return NewCombinedConfig(gameFactoryAddress, l1EthRpc, nil, supervisorRpcs) +func NewInteropConfig(gameFactoryAddress common.Address, l1EthRpc string, superNodeRpcs []string) Config { + return NewCombinedConfig(gameFactoryAddress, l1EthRpc, nil, superNodeRpcs) } func NewConfig(gameFactoryAddress common.Address, l1EthRpc string, rollupRpcs []string) Config { return NewCombinedConfig(gameFactoryAddress, l1EthRpc, rollupRpcs, nil) } -func NewCombinedConfig(gameFactoryAddress common.Address, l1EthRpc string, rollupRpcs []string, supervisorRpcs []string) Config { +func NewCombinedConfig(gameFactoryAddress common.Address, l1EthRpc string, rollupRpcs []string, superNodeRpcs []string) Config { return Config{ L1EthRpc: l1EthRpc, RollupRpcs: rollupRpcs, - SupervisorRpcs: supervisorRpcs, + SuperNodeRpcs: superNodeRpcs, GameFactoryAddress: gameFactoryAddress, MonitorInterval: DefaultMonitorInterval, @@ -78,8 +78,8 @@ func (c Config) Check() error { if c.L1EthRpc == "" { return ErrMissingL1EthRPC } - if len(c.RollupRpcs) == 0 && len(c.SupervisorRpcs) == 0 { - return ErrMissingRollupAndSupervisorRpc + if len(c.RollupRpcs) == 0 && len(c.SuperNodeRpcs) == 0 { + return ErrMissingRollupAndSuperNodeRpc } if c.GameFactoryAddress == (common.Address{}) { return ErrMissingGameFactoryAddress diff --git a/op-dispute-mon/config/config_test.go b/op-dispute-mon/config/config_test.go index 2ffd21511738c..1955d5c65ab76 100644 --- a/op-dispute-mon/config/config_test.go +++ b/op-dispute-mon/config/config_test.go @@ -12,7 +12,7 @@ var ( validL1EthRpc = "http://localhost:8545" validGameFactoryAddress = common.Address{0x23} validRollupRpcs = []string{"http://localhost:8555"} - validSupervisorRpcs = []string{"http://localhost:8999"} + validSuperNodeRpcs = []string{"http://localhost:8999"} ) func validConfig() Config { @@ -35,24 +35,24 @@ func TestGameFactoryAddressRequired(t *testing.T) { require.ErrorIs(t, config.Check(), ErrMissingGameFactoryAddress) } -func TestRollupRpcOrSupervisorRpcRequired(t *testing.T) { +func TestRollupRpcOrSuperNodeRpcRequired(t *testing.T) { config := validConfig() config.RollupRpcs = nil - config.SupervisorRpcs = nil - require.ErrorIs(t, config.Check(), ErrMissingRollupAndSupervisorRpc) + config.SuperNodeRpcs = nil + require.ErrorIs(t, config.Check(), ErrMissingRollupAndSuperNodeRpc) } -func TestRollupRpcNotRequiredWhenSupervisorRpcSet(t *testing.T) { +func TestRollupRpcNotRequiredWhenSuperNodeRpcSet(t *testing.T) { config := validConfig() config.RollupRpcs = nil - config.SupervisorRpcs = validSupervisorRpcs + config.SuperNodeRpcs = validSuperNodeRpcs require.NoError(t, config.Check()) } -func TestSupervisorRpcNotRequiredWhenRollupRpcSet(t *testing.T) { +func TestSuperNodeRpcNotRequiredWhenRollupRpcSet(t *testing.T) { config := validConfig() config.RollupRpcs = validRollupRpcs - config.SupervisorRpcs = nil + config.SuperNodeRpcs = nil require.NoError(t, config.Check()) } @@ -62,22 +62,22 @@ func TestMaxConcurrencyRequired(t *testing.T) { require.ErrorIs(t, config.Check(), ErrMissingMaxConcurrency) } -func TestMultipleSupervisorRpcs(t *testing.T) { +func TestMultipleSuperNodeRpcs(t *testing.T) { config := validConfig() config.RollupRpcs = nil - config.SupervisorRpcs = []string{"http://localhost:8999", "http://localhost:9000", "http://localhost:9001"} + config.SuperNodeRpcs = []string{"http://localhost:8999", "http://localhost:9000", "http://localhost:9001"} require.NoError(t, config.Check()) } func TestInteropConfig(t *testing.T) { gameFactoryAddr := common.Address{0x42} l1RPC := "http://localhost:8545" - supervisorRpcs := []string{"http://localhost:8999", "http://localhost:9000"} + superNodeRpcs := []string{"http://localhost:8999", "http://localhost:9000"} - config := NewInteropConfig(gameFactoryAddr, l1RPC, supervisorRpcs) + config := NewInteropConfig(gameFactoryAddr, l1RPC, superNodeRpcs) require.Equal(t, gameFactoryAddr, config.GameFactoryAddress) require.Equal(t, l1RPC, config.L1EthRpc) - require.Equal(t, supervisorRpcs, config.SupervisorRpcs) + require.Equal(t, superNodeRpcs, config.SuperNodeRpcs) require.Nil(t, config.RollupRpcs) require.NoError(t, config.Check()) } @@ -86,12 +86,12 @@ func TestCombinedConfig(t *testing.T) { gameFactoryAddr := common.Address{0x42} l1RPC := "http://localhost:8545" rollupRpcs := []string{"http://localhost:8555"} - supervisorRpcs := []string{"http://localhost:8999"} + superNodeRpcs := []string{"http://localhost:8999"} - config := NewCombinedConfig(gameFactoryAddr, l1RPC, rollupRpcs, supervisorRpcs) + config := NewCombinedConfig(gameFactoryAddr, l1RPC, rollupRpcs, superNodeRpcs) require.Equal(t, gameFactoryAddr, config.GameFactoryAddress) require.Equal(t, l1RPC, config.L1EthRpc) require.Equal(t, rollupRpcs, config.RollupRpcs) - require.Equal(t, supervisorRpcs, config.SupervisorRpcs) + require.Equal(t, superNodeRpcs, config.SuperNodeRpcs) require.NoError(t, config.Check()) } diff --git a/op-dispute-mon/flags/flags.go b/op-dispute-mon/flags/flags.go index 56198fc93830f..e27267a4989a0 100644 --- a/op-dispute-mon/flags/flags.go +++ b/op-dispute-mon/flags/flags.go @@ -38,10 +38,10 @@ var ( Usage: "HTTP provider URL for the rollup node. Multiple URLs can be specified for redundancy.", EnvVars: prefixEnvVars("ROLLUP_RPC"), } - SupervisorRpcFlag = &cli.StringSliceFlag{ - Name: "supervisor-rpc", - Usage: "HTTP provider URL for supervisor nodes. Multiple URLs can be specified for redundancy.", - EnvVars: prefixEnvVars("SUPERVISOR_RPC"), + SuperNodeRpcFlag = &cli.StringSliceFlag{ + Name: "supernode-rpc", + Usage: "HTTP provider URL for super nodes. Multiple URLs can be specified for redundancy.", + EnvVars: prefixEnvVars("SUPERNODE_RPC"), } GameFactoryAddressFlag = &cli.StringFlag{ Name: "game-factory-address", @@ -92,7 +92,7 @@ var requiredFlags = []cli.Flag{ // optionalFlags is a list of unchecked cli flags var optionalFlags = []cli.Flag{ RollupRpcFlag, - SupervisorRpcFlag, + SuperNodeRpcFlag, GameFactoryAddressFlag, NetworkFlag, HonestActorsFlag, @@ -119,8 +119,8 @@ func CheckRequired(ctx *cli.Context) error { return fmt.Errorf("flag %s is required", f.Names()[0]) } } - if len(ctx.StringSlice(RollupRpcFlag.Name)) == 0 && len(ctx.StringSlice(SupervisorRpcFlag.Name)) == 0 { - return fmt.Errorf("flag %s or %s is required", RollupRpcFlag.Name, SupervisorRpcFlag.Name) + if len(ctx.StringSlice(RollupRpcFlag.Name)) == 0 && len(ctx.StringSlice(SuperNodeRpcFlag.Name)) == 0 { + return fmt.Errorf("flag %s or %s is required", RollupRpcFlag.Name, SuperNodeRpcFlag.Name) } return nil } @@ -169,7 +169,7 @@ func NewConfigFromCLI(ctx *cli.Context) (*config.Config, error) { L1EthRpc: ctx.String(L1EthRpcFlag.Name), GameFactoryAddress: gameFactoryAddress, RollupRpcs: ctx.StringSlice(RollupRpcFlag.Name), - SupervisorRpcs: ctx.StringSlice(SupervisorRpcFlag.Name), + SuperNodeRpcs: ctx.StringSlice(SuperNodeRpcFlag.Name), HonestActors: actors, MonitorInterval: ctx.Duration(MonitorIntervalFlag.Name), diff --git a/op-dispute-mon/mon/extract/caller.go b/op-dispute-mon/mon/extract/caller.go index 366add6e54a50..4aa3aa96618b4 100644 --- a/op-dispute-mon/mon/extract/caller.go +++ b/op-dispute-mon/mon/extract/caller.go @@ -54,14 +54,11 @@ func (g *GameCallerCreator) CreateContract(ctx context.Context, game gameTypes.G case gameTypes.CannonGameType, gameTypes.PermissionedGameType, gameTypes.CannonKonaGameType, - gameTypes.AsteriscGameType, gameTypes.AlphabetGameType, gameTypes.FastGameType, - gameTypes.AsteriscKonaGameType, gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType, - gameTypes.SuperCannonKonaGameType, - gameTypes.SuperAsteriscKonaGameType: + gameTypes.SuperCannonKonaGameType: fdg, err := contracts.NewFaultDisputeGameContract(ctx, g.m, game.Proxy, g.caller) if err != nil { return nil, fmt.Errorf("failed to create fault dispute game contract: %w", err) diff --git a/op-dispute-mon/mon/extract/caller_test.go b/op-dispute-mon/mon/extract/caller_test.go index 119223a543129..ff5e42daadd6e 100644 --- a/op-dispute-mon/mon/extract/caller_test.go +++ b/op-dispute-mon/mon/extract/caller_test.go @@ -38,10 +38,6 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validCannonKonaGameType", game: types.GameMetadata{GameType: uint32(types.CannonKonaGameType), Proxy: fdgAddr}, }, - { - name: "validAsteriscGameType", - game: types.GameMetadata{GameType: uint32(types.AsteriscGameType), Proxy: fdgAddr}, - }, { name: "validAlphabetGameType", game: types.GameMetadata{GameType: uint32(types.AlphabetGameType), Proxy: fdgAddr}, @@ -50,10 +46,6 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validFastGameType", game: types.GameMetadata{GameType: uint32(types.FastGameType), Proxy: fdgAddr}, }, - { - name: "validAsteriscKonaGameType", - game: types.GameMetadata{GameType: uint32(types.AsteriscKonaGameType), Proxy: fdgAddr}, - }, { name: "validSuperCannonGameType", game: types.GameMetadata{GameType: uint32(types.SuperCannonGameType), Proxy: fdgAddr}, @@ -66,10 +58,6 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validSuperCannonKonaGameType", game: types.GameMetadata{GameType: uint32(types.SuperCannonKonaGameType), Proxy: fdgAddr}, }, - { - name: "validSuperAsteriscKonaGameType", - game: types.GameMetadata{GameType: uint32(types.SuperAsteriscKonaGameType), Proxy: fdgAddr}, - }, { name: "InvalidGameType", game: types.GameMetadata{GameType: 6, Proxy: fdgAddr}, @@ -102,8 +90,7 @@ func setupMetadataLoaderTest(t *testing.T, gameType uint32) (*batching.MultiCall fdgAbi := snapshots.LoadFaultDisputeGameABI() if gameType == uint32(types.SuperPermissionedGameType) || gameType == uint32(types.SuperCannonGameType) || - gameType == uint32(types.SuperCannonKonaGameType) || - gameType == uint32(types.SuperAsteriscKonaGameType) { + gameType == uint32(types.SuperCannonKonaGameType) { fdgAbi = snapshots.LoadSuperFaultDisputeGameABI() } stubRpc := batchingTest.NewAbiBasedRpc(t, fdgAddr, fdgAbi) diff --git a/op-dispute-mon/mon/extract/super_agreement_enricher.go b/op-dispute-mon/mon/extract/super_agreement_enricher.go index b4fbf4d1b2d34..3d50c7fb0650e 100644 --- a/op-dispute-mon/mon/extract/super_agreement_enricher.go +++ b/op-dispute-mon/mon/extract/super_agreement_enricher.go @@ -10,19 +10,17 @@ import ( "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" ) var ( - ErrSupervisorRpcRequired = errors.New("supervisor rpc required") - ErrAllSupervisorNodesUnavailable = errors.New("all supervisor nodes returned errors") + ErrSuperNodeRpcRequired = errors.New("super node rpc required") + ErrAllSuperNodesUnavailable = errors.New("all super nodes returned errors") ) type SuperRootProvider interface { - SuperRootAtTimestamp(ctx context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) + SuperRootAtTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) } type SuperAgreementEnricher struct { @@ -42,11 +40,10 @@ func NewSuperAgreementEnricher(logger log.Logger, metrics OutputMetrics, clients } type superRootResult struct { - superRoot common.Hash - isSafe bool - notFound bool - err error - crossSafeDerivedFrom uint64 + superRoot common.Hash + isSafe bool + notFound bool + err error } func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Block, caller GameCaller, game *monTypes.EnrichedGameData) error { @@ -54,7 +51,7 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc return nil } if len(e.clients) == 0 { - return fmt.Errorf("%w but required for game type %v", ErrSupervisorRpcRequired, game.GameType) + return fmt.Errorf("%w but required for game type %v", ErrSuperNodeRpcRequired, game.GameType) } results := make([]superRootResult, len(e.clients)) @@ -63,21 +60,20 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc wg.Add(1) go func(i int, client SuperRootProvider) { defer wg.Done() - response, err := client.SuperRootAtTimestamp(ctx, hexutil.Uint64(game.L2SequenceNumber)) - if errors.Is(err, ethereum.NotFound) { - results[i] = superRootResult{notFound: true} - return - } + response, err := client.SuperRootAtTimestamp(ctx, game.L2SequenceNumber) if err != nil { results[i] = superRootResult{err: err} return } + if response.Data == nil { + results[i] = superRootResult{notFound: true} + return + } - superRoot := common.Hash(response.SuperRoot) + superRoot := common.Hash(response.Data.SuperRoot) results[i] = superRootResult{ - superRoot: superRoot, - crossSafeDerivedFrom: response.CrossSafeDerivedFrom.Number, - isSafe: response.CrossSafeDerivedFrom.Number <= game.L1HeadNum, + superRoot: superRoot, + isSafe: response.Data.VerifiedRequiredL1.Number <= game.L1HeadNum, } }(i, client) } @@ -100,7 +96,7 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc // If all results were errors, return an error if len(validResults) == 0 { - return fmt.Errorf("failed to get super root at timestamp: %w", ErrAllSupervisorNodesUnavailable) + return fmt.Errorf("failed to get super root at timestamp: %w", ErrAllSuperNodesUnavailable) } // If all remaining nodes returned "not found", we disagree with any claim. @@ -129,7 +125,7 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc } if diverged { - e.log.Warn("Supervisor nodes disagree on super root", + e.log.Warn("Super nodes disagree on super root", "l2SequenceNumber", game.L2SequenceNumber, "firstSuperRoot", firstResult.superRoot, "found", len(foundResults), diff --git a/op-dispute-mon/mon/extract/super_agreement_enricher_test.go b/op-dispute-mon/mon/extract/super_agreement_enricher_test.go index 7fa0602786214..4c1ccf9f0f432 100644 --- a/op-dispute-mon/mon/extract/super_agreement_enricher_test.go +++ b/op-dispute-mon/mon/extract/super_agreement_enricher_test.go @@ -13,9 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -23,7 +21,7 @@ import ( func TestDetector_CheckSuperRootAgreement(t *testing.T) { t.Parallel() - t.Run("ErrorWhenNoSupervisorClient", func(t *testing.T) { + t.Run("ErrorWhenNoSuperNodeClient", func(t *testing.T) { validator, _, _ := setupSuperValidatorTest(t) validator.clients = nil // Set to nil to test the error case game := &types.EnrichedGameData{ @@ -35,7 +33,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { RootClaim: mockRootClaim, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) - require.ErrorIs(t, err, ErrSupervisorRpcRequired) + require.ErrorIs(t, err, ErrSuperNodeRpcRequired) }) t.Run("SkipOutputRootGameTypes", func(t *testing.T) { @@ -44,7 +42,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { gameType := gameType t.Run(fmt.Sprintf("GameType_%d", gameType), func(t *testing.T) { validator, _, metrics := setupSuperValidatorTest(t) - validator.clients = nil // Should not error even though there's no supervisor client + validator.clients = nil // Should not error even though there's no super node client game := &types.EnrichedGameData{ GameMetadata: challengerTypes.GameMetadata{ GameType: gameType, @@ -93,7 +91,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { RootClaim: mockRootClaim, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) - require.ErrorIs(t, err, ErrAllSupervisorNodesUnavailable) + require.ErrorIs(t, err, ErrAllSuperNodesUnavailable) require.Equal(t, common.Hash{}, game.ExpectedRootClaim) require.False(t, game.AgreeWithClaim) require.Zero(t, metrics.fetchTime) @@ -190,8 +188,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { t.Run("OutputNotFound", func(t *testing.T) { validator, client, metrics := setupSuperValidatorTest(t) - // The supervisor client automatically translates RPC errors back to ethereum.NotFound for us - client.outputErr = ethereum.NotFound + client.notFound = true game := &types.EnrichedGameData{ GameMetadata: challengerTypes.GameMetadata{ GameType: 999, @@ -207,8 +204,8 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { require.Zero(t, metrics.fetchTime) }) - t.Run("AllSupervisorNodesReturnError", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + t.Run("AllSuperNodesReturnError", func(t *testing.T) { + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) for _, client := range clients { client.outputErr = errors.New("boom") } @@ -222,16 +219,16 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.Error(t, err) - require.ErrorIs(t, err, ErrAllSupervisorNodesUnavailable) + require.ErrorIs(t, err, ErrAllSuperNodesUnavailable) require.Equal(t, common.Hash{}, game.ExpectedRootClaim) require.False(t, game.AgreeWithClaim) require.Zero(t, metrics.fetchTime) }) - t.Run("AllSupervisorNodesReturnNotFound", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + t.Run("AllSuperNodesReturnNotFound", func(t *testing.T) { + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) for _, client := range clients { - client.outputErr = ethereum.NotFound + client.notFound = true } game := &types.EnrichedGameData{ GameMetadata: challengerTypes.GameMetadata{ @@ -248,9 +245,9 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { require.Zero(t, metrics.fetchTime) }) - t.Run("SomeSupervisorNodesOutOfSync", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) - clients[0].outputErr = ethereum.NotFound + t.Run("SomeSuperNodesOutOfSync", func(t *testing.T) { + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) + clients[0].notFound = true clients[1].outputErr = nil clients[2].outputErr = nil game := &types.EnrichedGameData{ @@ -268,8 +265,8 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { require.NotZero(t, metrics.fetchTime) }) - t.Run("SupervisorNodesDiverged", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + t.Run("SuperNodesDiverged", func(t *testing.T) { + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) divergedRoot := common.HexToHash("0x5678") clients[0].superRoot = mockRootClaim clients[1].superRoot = divergedRoot @@ -289,8 +286,8 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { require.NotZero(t, metrics.fetchTime) }) - t.Run("AllSupervisorNodesAgree", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + t.Run("AllSuperNodesAgree", func(t *testing.T) { + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) clients[0].derivedFromL1BlockNum = 200 clients[1].derivedFromL1BlockNum = 199 clients[2].derivedFromL1BlockNum = 201 @@ -310,9 +307,9 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { }) t.Run("MixedResponses_FoundNodesMatchClaimAndSafe", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 4) - clients[0].outputErr = ethereum.NotFound - clients[1].outputErr = ethereum.NotFound + validator, clients, metrics := setupMultiSuperNodeTest(t, 4) + clients[0].notFound = true + clients[1].notFound = true clients[2].superRoot = mockRootClaim clients[2].derivedFromL1BlockNum = 100 // Safe because L1HeadNum is 200 clients[3].superRoot = mockRootClaim @@ -333,9 +330,9 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { }) t.Run("MixedResponses_FoundNodesDontMatchClaim", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) differentRoot := common.HexToHash("0x9999") - clients[0].outputErr = ethereum.NotFound + clients[0].notFound = true clients[1].superRoot = differentRoot clients[1].derivedFromL1BlockNum = 100 clients[2].superRoot = differentRoot @@ -356,7 +353,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { }) t.Run("AllNodesAgree_SuperRootMatchesClaim_NoneReportSafe", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) for _, client := range clients { client.superRoot = mockRootClaim @@ -379,7 +376,7 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { }) t.Run("AllNodesAgree_SuperRootDifferentFromClaim", func(t *testing.T) { - validator, clients, metrics := setupMultiSupervisorTest(t, 3) + validator, clients, metrics := setupMultiSuperNodeTest(t, 3) differentRoot := common.HexToHash("0xdifferent") for _, client := range clients { @@ -403,46 +400,51 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { }) } -func setupSuperValidatorTest(t *testing.T) (*SuperAgreementEnricher, *stubSupervisorClient, *stubOutputMetrics) { +func setupSuperValidatorTest(t *testing.T) (*SuperAgreementEnricher, *stubSuperNodeClient, *stubOutputMetrics) { logger := testlog.Logger(t, log.LvlInfo) - client := &stubSupervisorClient{derivedFromL1BlockNum: 0, superRoot: mockRootClaim} + client := &stubSuperNodeClient{derivedFromL1BlockNum: 0, superRoot: mockRootClaim} metrics := &stubOutputMetrics{} validator := NewSuperAgreementEnricher(logger, metrics, []SuperRootProvider{client}, clock.NewDeterministicClock(time.Unix(9824924, 499))) return validator, client, metrics } -func setupMultiSupervisorTest(t *testing.T, numNodes int) (*SuperAgreementEnricher, []*stubSupervisorClient, *stubOutputMetrics) { +func setupMultiSuperNodeTest(t *testing.T, numNodes int) (*SuperAgreementEnricher, []*stubSuperNodeClient, *stubOutputMetrics) { logger := testlog.Logger(t, log.LvlInfo) - clients := make([]*stubSupervisorClient, numNodes) - supervisorClients := make([]SuperRootProvider, numNodes) + clients := make([]*stubSuperNodeClient, numNodes) + superNodeClients := make([]SuperRootProvider, numNodes) for i := range clients { - clients[i] = &stubSupervisorClient{ + clients[i] = &stubSuperNodeClient{ derivedFromL1BlockNum: 0, superRoot: mockRootClaim, } - supervisorClients[i] = clients[i] + superNodeClients[i] = clients[i] } metrics := &stubOutputMetrics{} - validator := NewSuperAgreementEnricher(logger, metrics, supervisorClients, clock.NewDeterministicClock(time.Unix(9824924, 499))) + validator := NewSuperAgreementEnricher(logger, metrics, superNodeClients, clock.NewDeterministicClock(time.Unix(9824924, 499))) return validator, clients, metrics } -type stubSupervisorClient struct { +type stubSuperNodeClient struct { requestedTimestamp uint64 outputErr error + notFound bool derivedFromL1BlockNum uint64 superRoot common.Hash } -func (s *stubSupervisorClient) SuperRootAtTimestamp(_ context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) { +func (s *stubSuperNodeClient) SuperRootAtTimestamp(_ context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) { s.requestedTimestamp = uint64(timestamp) if s.outputErr != nil { - return eth.SuperRootResponse{}, s.outputErr + return eth.SuperRootAtTimestampResponse{}, s.outputErr } - return eth.SuperRootResponse{ - CrossSafeDerivedFrom: eth.BlockID{Number: s.derivedFromL1BlockNum}, - Timestamp: uint64(timestamp), - SuperRoot: eth.Bytes32(s.superRoot), - Version: eth.SuperRootVersionV1, + if s.notFound { + return eth.SuperRootAtTimestampResponse{}, nil + } + return eth.SuperRootAtTimestampResponse{ + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: eth.BlockID{Number: s.derivedFromL1BlockNum}, + Super: eth.NewSuperV1(timestamp), + SuperRoot: eth.Bytes32(s.superRoot), + }, }, nil } diff --git a/op-dispute-mon/mon/service.go b/op-dispute-mon/mon/service.go index 73c5ab2275b99..43899427978e7 100644 --- a/op-dispute-mon/mon/service.go +++ b/op-dispute-mon/mon/service.go @@ -38,9 +38,9 @@ type Service struct { cl clock.Clock - game *extract.GameCallerCreator - rollupClients []*sources.RollupClient - supervisorClients []*sources.SupervisorClient + game *extract.GameCallerCreator + rollupClients []*sources.RollupClient + superNodeClients []*sources.SuperNodeClient l1RPC rpcclient.RPC l1Client *sources.L1Client @@ -84,8 +84,8 @@ func (s *Service) initFromConfig(ctx context.Context, cfg *config.Config) error if err := s.initOutputRollupClient(ctx, cfg); err != nil { return fmt.Errorf("failed to init rollup client: %w", err) } - if err := s.initSupervisorClients(ctx, cfg); err != nil { - return fmt.Errorf("failed to init supervisor clients: %w", err) + if err := s.initSuperNodeClients(ctx, cfg); err != nil { + return fmt.Errorf("failed to init super node clients: %w", err) } s.initGameCallerCreator() // Must be called before initForecast @@ -111,8 +111,8 @@ func (s *Service) outputRollupClients() []extract.OutputRollupClient { } func (s *Service) asSuperRootProviders() []extract.SuperRootProvider { - clients := make([]extract.SuperRootProvider, len(s.supervisorClients)) - for i, client := range s.supervisorClients { + clients := make([]extract.SuperRootProvider, len(s.superNodeClients)) + for i, client := range s.superNodeClients { clients[i] = client } return clients @@ -132,16 +132,16 @@ func (s *Service) initOutputRollupClient(ctx context.Context, cfg *config.Config return nil } -func (s *Service) initSupervisorClients(ctx context.Context, cfg *config.Config) error { - if len(cfg.SupervisorRpcs) == 0 { +func (s *Service) initSuperNodeClients(ctx context.Context, cfg *config.Config) error { + if len(cfg.SuperNodeRpcs) == 0 { return nil } - for _, rpc := range cfg.SupervisorRpcs { - client, err := dial.DialSupervisorClientWithTimeout(ctx, s.logger, rpc, rpcclient.WithLazyDial()) + for _, rpc := range cfg.SuperNodeRpcs { + client, err := dial.DialSuperNodeClientWithTimeout(ctx, s.logger, rpc, rpcclient.WithLazyDial()) if err != nil { - return fmt.Errorf("failed to dial supervisor client %s: %w", rpc, err) + return fmt.Errorf("failed to dial super node client %s: %w", rpc, err) } - s.supervisorClients = append(s.supervisorClients, client) + s.superNodeClients = append(s.superNodeClients, client) } return nil } diff --git a/op-e2e/actions/helpers/l2_proposer.go b/op-e2e/actions/helpers/l2_proposer.go index 82404117b6b74..527a803e293c4 100644 --- a/op-e2e/actions/helpers/l2_proposer.go +++ b/op-e2e/actions/helpers/l2_proposer.go @@ -92,7 +92,7 @@ func (f fakeTxMgr) API() rpc.API { panic("unimplemented") } -func (f fakeTxMgr) SuggestGasPriceCaps(context.Context) (*big.Int, *big.Int, *big.Int, error) { +func (f fakeTxMgr) SuggestGasPriceCaps(context.Context) (*big.Int, *big.Int, *big.Int, *big.Int, error) { panic("unimplemented") } diff --git a/op-e2e/actions/interop/proofs_test.go b/op-e2e/actions/interop/proofs_test.go index a680f0dd8f398..3913903a9657f 100644 --- a/op-e2e/actions/interop/proofs_test.go +++ b/op-e2e/actions/interop/proofs_test.go @@ -1468,7 +1468,7 @@ func runChallengerTest(gt *testing.T, test *transitionTest, actors *dsl.InteropA gameDepth := challengerTypes.Depth(30) rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) require.NoError(t, err) - provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, actors.Supervisor, l1Head, gameDepth, startTimestamp, endTimestamp) + provider := super.NewSupervisorSuperTraceProvider(logger, rollupCfgs, prestateProvider, actors.Supervisor, l1Head, gameDepth, startTimestamp, endTimestamp) var agreedPrestate []byte if test.disputedTraceIndex > 0 { agreedPrestate, err = provider.GetPreimageBytes(t.Ctx(), challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index e5fb8c6ca3c1a..d5021d7314de6 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -45,16 +45,17 @@ var ( ) const ( - cannonGameType uint32 = 0 - permissionedGameType uint32 = 1 - superCannonGameType uint32 = 4 - superPermissionedGameType uint32 = 5 - alphabetGameType uint32 = 255 + cannonGameType uint32 = 0 + permissionedGameType uint32 = 1 + superCannonGameType uint32 = 4 + alphabetGameType uint32 = 255 ) type GameCfg struct { - allowFuture bool - allowUnsafe bool + allowFuture bool + allowUnsafe bool + superOutputRoots []eth.Bytes32 + super eth.Super } type GameOpt interface { Apply(cfg *GameCfg) @@ -77,6 +78,20 @@ func WithFutureProposal() GameOpt { }) } +// WithInvalidSuperRoot configures the game to use invalid super output roots. +func WithInvalidSuperRoot() GameOpt { + return gameOptFn(func(c *GameCfg) { + c.superOutputRoots = []eth.Bytes32{{0x01}, {0x02}} + }) +} + +// WithSuper allows specifying a custom super structure. +func WithSuper(super eth.Super) GameOpt { + return gameOptFn(func(c *GameCfg) { + c.super = super + }) +} + type DisputeSystem interface { L1BeaconEndpoint() endpoint.RestHTTP SupervisorClient() *sources.SupervisorClient @@ -225,37 +240,34 @@ func (h *FactoryHelper) StartSuperCannonGameWithCorrectRoot(ctx context.Context, require.NoError(h.T, err) l2Timestamp := b.Time() h.WaitForSuperTimestamp(l2Timestamp, cfg) - output, err := h.System.SupervisorClient().SuperRootAtTimestamp(ctx, hexutil.Uint64(l2Timestamp)) - h.Require.NoErrorf(err, "Failed to get output at timestamp %v", l2Timestamp) - return h.startSuperCannonGameOfType(ctx, l2Timestamp, common.Hash(output.SuperRoot), superCannonGameType, opts...) + return h.startSuperCannonGameOfType(ctx, l2Timestamp, superCannonGameType, opts...) } func (h *FactoryHelper) StartSuperCannonGameWithCorrectRootAtTimestamp(ctx context.Context, l2Timestamp uint64, opts ...GameOpt) *SuperCannonGameHelper { cfg := NewGameCfg(opts...) h.WaitForSuperTimestamp(l2Timestamp, cfg) - output, err := h.System.SupervisorClient().SuperRootAtTimestamp(ctx, hexutil.Uint64(l2Timestamp)) - h.Require.NoErrorf(err, "Failed to get output at timestamp %v", l2Timestamp) - return h.startSuperCannonGameOfType(ctx, l2Timestamp, common.Hash(output.SuperRoot), superCannonGameType, opts...) + return h.startSuperCannonGameOfType(ctx, l2Timestamp, superCannonGameType, opts...) } -func (h *FactoryHelper) StartSuperCannonGame(ctx context.Context, rootClaim common.Hash, opts ...GameOpt) *SuperCannonGameHelper { +func (h *FactoryHelper) StartSuperCannonGame(ctx context.Context, opts ...GameOpt) *SuperCannonGameHelper { // Can't create a game at L1 genesis! require.NoError(h.T, wait.ForBlock(ctx, h.Client, 1)) b, err := h.Client.BlockByNumber(ctx, nil) require.NoError(h.T, err) - return h.startSuperCannonGameOfType(ctx, b.Time(), rootClaim, superCannonGameType, opts...) + return h.startSuperCannonGameOfType(ctx, b.Time(), superCannonGameType, opts...) } -func (h *FactoryHelper) StartSuperCannonGameAtTimestamp(ctx context.Context, timestamp uint64, rootClaim common.Hash, opts ...GameOpt) *SuperCannonGameHelper { - return h.startSuperCannonGameOfType(ctx, timestamp, rootClaim, superCannonGameType, opts...) +func (h *FactoryHelper) StartSuperCannonGameAtTimestamp(ctx context.Context, timestamp uint64, opts ...GameOpt) *SuperCannonGameHelper { + return h.startSuperCannonGameOfType(ctx, timestamp, superCannonGameType, opts...) } -func (h *FactoryHelper) startSuperCannonGameOfType(ctx context.Context, timestamp uint64, rootClaim common.Hash, gameType uint32, opts ...GameOpt) *SuperCannonGameHelper { +func (h *FactoryHelper) startSuperCannonGameOfType(ctx context.Context, timestamp uint64, gameType uint32, opts ...GameOpt) *SuperCannonGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "CannonGameHelper") rootProvider := h.System.SupervisorClient() - extraData := h.CreateSuperGameExtraData(ctx, rootProvider, timestamp, cfg) + extraData := h.createSuperGameExtraData(ctx, rootProvider, timestamp, cfg) + rootClaim := crypto.Keccak256Hash(extraData) ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -281,7 +293,7 @@ func (h *FactoryHelper) startSuperCannonGameOfType(ctx context.Context, timestam prestateProvider := super.NewSuperRootPrestateProvider(rootProvider, prestateTimestamp) rollupCfgs, err := super.NewRollupConfigsFromParsed(h.System.RollupCfgs()...) require.NoError(h.T, err, "failed to create rollup configs") - provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) + provider := super.NewSupervisorSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) return NewSuperCannonGameHelper(h.T, h.Client, h.Opts, h.PrivKey, game, h.FactoryAddr, createdEvent.DisputeProxy, provider, h.System) } @@ -348,7 +360,7 @@ func (h *FactoryHelper) CreateBisectionGameExtraData(l2Node string, l2BlockNumbe return extraData } -func (h *FactoryHelper) CreateSuperGameExtraData(ctx context.Context, supervisor *sources.SupervisorClient, timestamp uint64, cfg *GameCfg) []byte { +func (h *FactoryHelper) createSuperGameExtraData(ctx context.Context, supervisor *sources.SupervisorClient, timestamp uint64, cfg *GameCfg) []byte { if !cfg.allowFuture { timedCtx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() @@ -361,10 +373,26 @@ func (h *FactoryHelper) CreateSuperGameExtraData(ctx context.Context, supervisor }) require.NoError(h.T, err, "Safe head did not reach proposal timestamp") } - h.T.Logf("Creating game with l2 timestamp: %v", timestamp) - extraData := make([]byte, 32) - binary.BigEndian.PutUint64(extraData[24:], timestamp) - return extraData + + super := cfg.super + if super == nil { + h.T.Logf("Creating game with l2 timestamp: %v", timestamp) + superResponse, err := h.System.SupervisorClient().SuperRootAtTimestamp(ctx, hexutil.Uint64(timestamp)) + h.Require.NoErrorf(err, "Failed to get super root at timestamp %v", timestamp) + super, err = superResponse.ToSuper() + h.Require.NoErrorf(err, "Failed to parse super at timestamp %v", timestamp) + } + + superV1, ok := super.(*eth.SuperV1) + h.Require.Truef(ok, "Unsupported super type %T", super) + superV1.Timestamp = timestamp // override in case it's different from the game timestamp + if len(cfg.superOutputRoots) != 0 { + h.Require.Len(cfg.superOutputRoots, len(superV1.Chains), "Super output roots length mismatch") + for i := range superV1.Chains { + superV1.Chains[i].Output = cfg.superOutputRoots[i] + } + } + return superV1.Marshal() } func (h *FactoryHelper) WaitForBlock(l2Node string, l2BlockNumber uint64, cfg *GameCfg) { @@ -442,3 +470,16 @@ func (h *FactoryHelper) StartChallenger(ctx context.Context, name string, option }) return c } + +// CreateInvalidSuper creates a SuperV1 with invalid outputs +func CreateInvalidSuper(timestamp uint64) *eth.SuperV1 { + return ð.SuperV1{ + Timestamp: timestamp, + Chains: []eth.ChainIDAndOutput{ + { + ChainID: eth.ChainIDFromUInt64(1), + Output: eth.Bytes32{0x01}, + }, + }, + } +} diff --git a/op-e2e/e2eutils/disputegame/super_cannon_helper.go b/op-e2e/e2eutils/disputegame/super_cannon_helper.go index e441a938a1852..52ca34ecd7553 100644 --- a/op-e2e/e2eutils/disputegame/super_cannon_helper.go +++ b/op-e2e/e2eutils/disputegame/super_cannon_helper.go @@ -32,7 +32,7 @@ type SuperCannonGameHelper struct { CannonHelper } -func NewSuperCannonGameHelper(t *testing.T, client *ethclient.Client, opts *bind.TransactOpts, key *ecdsa.PrivateKey, game contracts.FaultDisputeGameContract, factoryAddr common.Address, gameAddr common.Address, provider *super.SuperTraceProvider, system DisputeSystem) *SuperCannonGameHelper { +func NewSuperCannonGameHelper(t *testing.T, client *ethclient.Client, opts *bind.TransactOpts, key *ecdsa.PrivateKey, game contracts.FaultDisputeGameContract, factoryAddr common.Address, gameAddr common.Address, provider super.SuperTraceProvider, system DisputeSystem) *SuperCannonGameHelper { superGameHelper := NewSuperGameHelper(t, require.New(t), client, opts, key, game, factoryAddr, gameAddr, provider, system) defaultChallengerOptions := func() []challenger.Option { return []challenger.Option{ @@ -75,6 +75,7 @@ func (g *SuperCannonGameHelper) CreateHonestActor(ctx context.Context, options . vm.NewOpProgramServerExecutor(logger), prestateProvider, supervisorClient, + nil, cfg.CannonAbsolutePreState, dir, l1Head, @@ -183,7 +184,7 @@ func (g *SuperCannonGameHelper) createSuperCannonTraceProvider(ctx context.Conte return translatingProvider.Original().(*cannon.CannonTraceProviderForTest) } -func (g *SuperCannonGameHelper) createSuperTraceProvider(ctx context.Context) *super.SuperTraceProvider { +func (g *SuperCannonGameHelper) createSuperTraceProvider(ctx context.Context) super.SuperTraceProvider { logger := testlog.Logger(g.t, log.LevelInfo).New("role", "superTraceProvider", "game", g.splitGame.Addr) rootProvider := g.System.SupervisorClient() splitDepth := g.splitGame.SplitDepth(ctx) @@ -193,7 +194,7 @@ func (g *SuperCannonGameHelper) createSuperTraceProvider(ctx context.Context) *s prestateProvider := super.NewSuperRootPrestateProvider(rootProvider, prestateTimestamp) rollupCfgs, err := super.NewRollupConfigsFromParsed(g.System.RollupCfgs()...) require.NoError(g.T, err, "failed to create rollup configs") - return super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) + return super.NewSupervisorSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) } // InitFirstDerivationGame builds a top-level game whose deepest node (at splitDepth) asserts the first diff --git a/op-e2e/e2eutils/disputegame/super_game_helper.go b/op-e2e/e2eutils/disputegame/super_game_helper.go index 09ad68f20898b..bce9559af1111 100644 --- a/op-e2e/e2eutils/disputegame/super_game_helper.go +++ b/op-e2e/e2eutils/disputegame/super_game_helper.go @@ -19,7 +19,7 @@ type SuperGameHelper struct { } func NewSuperGameHelper(t *testing.T, require *require.Assertions, client *ethclient.Client, opts *bind.TransactOpts, privKey *ecdsa.PrivateKey, - game contracts.FaultDisputeGameContract, factoryAddr common.Address, addr common.Address, correctOutputProvider *super.SuperTraceProvider, system DisputeSystem) *SuperGameHelper { + game contracts.FaultDisputeGameContract, factoryAddr common.Address, addr common.Address, correctOutputProvider super.SuperTraceProvider, system DisputeSystem) *SuperGameHelper { return &SuperGameHelper{ SplitGameHelper: SplitGameHelper{ T: t, diff --git a/op-e2e/faultproofs/super_test.go b/op-e2e/faultproofs/super_test.go index 0944f0d97dced..b739eb1a90712 100644 --- a/op-e2e/faultproofs/super_test.go +++ b/op-e2e/faultproofs/super_test.go @@ -31,7 +31,7 @@ func TestCreateSuperCannonGame(t *testing.T) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) sys.L2IDs() - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) game.LogGameData(ctx) }) } @@ -40,7 +40,7 @@ func TestSuperCannonGame(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testCannonGame(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }) } @@ -49,7 +49,7 @@ func TestSuperCannonGame_WithBlobs(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType), WithBlobBatches()) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testCannonGame(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }) } @@ -58,7 +58,7 @@ func TestSuperCannonGame_ChallengeAllZeroClaim(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testCannonChallengeAllZeroClaim(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }, WithNextVMOnly[any]()) } @@ -88,7 +88,7 @@ func TestSuperCannonPublishCannonRootClaim(t *testing.T) { require.NoError(t, err) disputeL2SequenceNumber := b.Time() + test.disputeL2SequenceNumberOffset - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, disputeL2SequenceNumber, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, disputeL2SequenceNumber, disputegame.WithInvalidSuperRoot()) game.DisputeLastBlock(ctx) game.LogGameData(ctx) game.StartChallenger(ctx, "Challenger", challenger.WithPrivKey(aliceKey(t)), challenger.WithDepset(t, sys.DependencySet())) @@ -140,7 +140,7 @@ func TestSuperCannonDisputeGame(t *testing.T) { RunTestsAcrossVmTypes(t, tests, func(t *testing.T, allocType config.AllocType, test TestCase) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) game.LogGameData(ctx) disputeClaim := game.DisputeLastBlock(ctx) @@ -180,7 +180,7 @@ func TestSuperCannonDefendStep(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testCannonDefendStep(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }, WithNextVMOnly[any]()) } @@ -300,7 +300,7 @@ func TestSuperCannonPoisonedPostState(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testCannonPoisonedPostState(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }, WithNextVMOnly[any]()) } @@ -318,7 +318,7 @@ func TestSuperCannonRootBeyondProposedBlock_InvalidRoot(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testDisputeRootBeyondProposedBlockInvalidOutputRoot(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }, WithNextVMOnly[any]()) } @@ -327,7 +327,7 @@ func TestSuperCannonRootChangeClaimedRoot(t *testing.T) { RunTestAcrossVmTypes(t, func(t *testing.T, allocType config.AllocType) { ctx := context.Background() sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) - game := disputeGameFactory.StartSuperCannonGame(ctx, common.Hash{0x01}) + game := disputeGameFactory.StartSuperCannonGame(ctx, disputegame.WithInvalidSuperRoot()) testDisputeRootChangeClaimedRoot(t, ctx, createSuperGameArena(t, sys, game), &game.SplitGameHelper) }, WithNextVMOnly[any]()) } @@ -387,8 +387,7 @@ func TestSuperInvalidateUnsafeProposal(t *testing.T) { // Root claim is _dishonest_ because the required data is not available on L1 unsafeSuper := createSuperRoot(t, ctx, sys, unsafeTimestamp) - unsafeRoot := eth.SuperRoot(unsafeSuper) - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, unsafeTimestamp, common.Hash(unsafeRoot), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, unsafeTimestamp, disputegame.WithSuper(unsafeSuper), disputegame.WithFutureProposal()) correctTrace := game.CreateHonestActor(ctx, disputegame.WithPrivKey(malloryKey(t)), func(c *disputegame.HonestActorConfig) { c.ChallengerOpts = append(c.ChallengerOpts, challenger.WithDepset(t, sys.DependencySet())) @@ -446,8 +445,7 @@ func TestSuperInalidateUnsafeProposal_SecondChainIsUnsafe(t *testing.T) { // Root claim is _dishonest_ because the required data to construct the chain B output root is not available on L1 unsafeSuper := createSuperRoot(t, ctx, sys, gameTimestamp) - unsafeRoot := eth.SuperRoot(unsafeSuper) - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, gameTimestamp, common.Hash(unsafeRoot), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, gameTimestamp, disputegame.WithSuper(unsafeSuper), disputegame.WithFutureProposal()) prestateTimestamp, _, err := game.Game.GetGameRange(ctx) require.NoError(t, err, "Failed to get game range") @@ -501,7 +499,9 @@ func TestSuperInvalidateProposalForFutureBlock(t *testing.T) { sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) // Root claim is _dishonest_ because the required data is not available on L1 farFutureTimestamp := time.Now().Add(time.Second * 10_000_000).Unix() - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, uint64(farFutureTimestamp), common.Hash{0x01}, disputegame.WithFutureProposal()) + invalidSuper := disputegame.CreateInvalidSuper(uint64(farFutureTimestamp)) + // manually create an invalid super since we can't rely on the super RPC to fetch one at the far timestamp + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, uint64(farFutureTimestamp), disputegame.WithSuper(invalidSuper), disputegame.WithFutureProposal()) correctTrace := game.CreateHonestActor(ctx, disputegame.WithPrivKey(malloryKey(t)), func(c *disputegame.HonestActorConfig) { c.ChallengerOpts = append(c.ChallengerOpts, challenger.WithDepset(t, sys.DependencySet())) }) @@ -567,6 +567,8 @@ func TestSuperInvalidateCorrectProposalFutureBlock(t *testing.T) { require.NoError(t, err, "Failed to get sync status") superRoot, err := client.SuperRootAtTimestamp(ctx, hexutil.Uint64(status.SafeTimestamp)) require.NoError(t, err, "Failed to get super root at safe timestamp") + super, err := superRoot.ToSuper() + require.NoError(t, err, "Failed to parse super root") // Stop the batcher so the safe head doesn't advance for _, id := range sys.L2IDs() { @@ -575,7 +577,7 @@ func TestSuperInvalidateCorrectProposalFutureBlock(t *testing.T) { // Create a dispute game with a proposal that is valid at `superRoot.Timestamp`, but that claims to correspond to timestamp // `superRoot.Timestamp + 100000`. This is dishonest, because the superchain hasn't reached this timestamp yet. - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, superRoot.Timestamp+100_000, common.Hash(superRoot.SuperRoot), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, superRoot.Timestamp+100_000, disputegame.WithSuper(super), disputegame.WithFutureProposal()) game.StartChallenger(ctx, "Challenger", challenger.WithPrivKey(aliceKey(t)), challenger.WithDepset(t, sys.DependencySet())) @@ -673,7 +675,7 @@ func TestSuperCannonHonestSafeTraceExtensionInvalidRoot(t *testing.T) { disputeGameFactory.WaitForSuperTimestamp(safeTimestamp, new(disputegame.GameCfg)) - game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, safeTimestamp-1, common.Hash{0xCA, 0xFE}) + game := disputeGameFactory.StartSuperCannonGameAtTimestamp(ctx, safeTimestamp-1, disputegame.WithInvalidSuperRoot()) require.NotNil(t, game) // Create a correct trace actor with an honest trace extending to safeTimestamp diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index 2f7c2ffe3e02b..5406127006baf 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -293,11 +293,11 @@ func TestBatcherAutoDA(t *testing.T) { // Helpers mustGetFees := func() (*big.Int, *big.Int, *big.Int, float64) { - tip, baseFee, blobFee, err := txmgr.DefaultGasPriceEstimatorFn(ctx, l1Client) + tip, baseFee, _, blobBaseFee, err := txmgr.DefaultGasPriceEstimatorFn(ctx, l1Client) require.NoError(t, err) - feeRatio := float64(blobFee.Int64()) / float64(baseFee.Int64()+tip.Int64()) - t.Logf("L1 fees are: baseFee(%d), tip(%d), blobBaseFee(%d). feeRatio: %f", baseFee, tip, blobFee, feeRatio) - return tip, baseFee, blobFee, feeRatio + feeRatio := float64(blobBaseFee.Int64()) / float64(baseFee.Int64()+tip.Int64()) + t.Logf("L1 fees are: baseFee(%d), tip(%d), blobBaseFee(%d). feeRatio: %f", baseFee, tip, blobBaseFee, feeRatio) + return tip, baseFee, blobBaseFee, feeRatio } requireEventualBatcherTxType := func(txType uint8, timeout time.Duration, strict bool) { var foundOtherTxType bool diff --git a/op-geth b/op-geth index 4b18713279247..3402175277067 160000 --- a/op-geth +++ b/op-geth @@ -1 +1 @@ -Subproject commit 4b1871327924788c9d8aba64d4e0e64e7d01726e +Subproject commit 3402175277067607b2aa72448d723d4aec1d9f6a diff --git a/op-node/rollup/attributes/engine_consolidate_test.go b/op-node/rollup/attributes/engine_consolidate_test.go index 7c57ef3cb6dc9..cc5c0c6522b77 100644 --- a/op-node/rollup/attributes/engine_consolidate_test.go +++ b/op-node/rollup/attributes/engine_consolidate_test.go @@ -214,7 +214,8 @@ func createMismatchedFeeRecipient() matchArgs { func createMismatchedEIP1559Params() matchArgs { args := holoceneArgs() - args.attrs.EIP1559Params[0]++ // so denominator is != 0 + // Create valid but mismatched EIP-1559 params (both denominator and elasticity must be non-zero or both zero) + copy((*args.attrs.EIP1559Params)[:], eip1559.EncodeHolocene1559Params(999, 999)) return args } @@ -575,7 +576,7 @@ func TestCheckEIP1559ParamsMatch(t *testing.T) { desc: "err-both-zero", attrParams: new(eth.Bytes8), blockExtraData: make(eth.BytesMax32, 9), - err: "eip1559 parameters do not match, attributes: 250, 6 (translated from 0,0), block: 0, 0", + err: "invalid block extraData: holocene extraData must encode a non-zero denominator", }, { desc: "err-invalid-params", @@ -609,7 +610,8 @@ func TestCheckEIP1559ParamsMatch(t *testing.T) { HoloceneTime: &pastTime, IsthmusTime: &pastTime, JovianTime: &futureTime, - ChainOpConfig: defaultOpConfig} + ChainOpConfig: defaultOpConfig, + } err := checkExtraDataParamsMatch(cfg, uint64(2), test.attrParams, nil, test.blockExtraData) if test.err == "" { require.NoError(t, err) diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 9bfee7cc66ce9..5daa33238cb64 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -302,7 +302,8 @@ func checkSpanBatchPrefix(ctx context.Context, cfg *rollup.Config, log log.Logge return BatchAccept, parentBlock } -// checkSpanBatch performs the full SpanBatch validation rules. +// checkSpanBatch checks the full SpanBatch semantic validation rules on a syntactically-correct +// span batch. func checkSpanBatch(ctx context.Context, cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l2SafeHead eth.L2BlockRef, batch *SpanBatch, l1InclusionBlock eth.L1BlockRef, l2Fetcher SafeBlockFetcher, ) BatchValidity { diff --git a/op-node/rollup/derive/channel_compressor.go b/op-node/rollup/derive/channel_compressor.go index 341dd13d825ef..f85e1eb2b664c 100644 --- a/op-node/rollup/derive/channel_compressor.go +++ b/op-node/rollup/derive/channel_compressor.go @@ -19,6 +19,8 @@ type ChannelCompressor interface { Close() error Reset() Len() int + // StaticBytesLen returns the number of header bytes written to the buffer at construction time + StaticBytesLen() int Read([]byte) (int, error) GetCompressed() *bytes.Buffer } @@ -56,6 +58,10 @@ func (zc *ZlibCompressor) Reset() { zc.CompressorWriter.Reset(zc.compressed) } +func (bc *ZlibCompressor) StaticBytesLen() int { + return 0 +} + type BrotliCompressor struct { BaseChannelCompressor } @@ -66,6 +72,10 @@ func (bc *BrotliCompressor) Reset() { bc.CompressorWriter.Reset(bc.compressed) } +func (bc *BrotliCompressor) StaticBytesLen() int { + return 1 +} + func NewChannelCompressor(algo CompressionAlgo) (ChannelCompressor, error) { compressed := &bytes.Buffer{} if algo == Zlib { diff --git a/op-node/rollup/derive/system_config.go b/op-node/rollup/derive/system_config.go index 773a1c6f51772..8e145d144ffae 100644 --- a/op-node/rollup/derive/system_config.go +++ b/op-node/rollup/derive/system_config.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/hashicorp/go-multierror" @@ -33,8 +34,9 @@ var ( ) var ( - ErrUnknownEventVersion = errors.New("unknown SystemConfig event version") - ErrUnknownEventType = errors.New("unknown SystemConfig event type") + ErrUnknownEventVersion = errors.New("unknown SystemConfig event version") + ErrUnknownEventType = errors.New("unknown SystemConfig event type") + ErrInvalidEIP1559Params = errors.New("invalid EIP-1559 parameters") ) // UpdateSystemConfigWithL1Receipts filters all L1 receipts to find config updates and applies the config updates to the given sysCfg @@ -234,6 +236,10 @@ func parseSystemConfigUpdateEIP1559Params(data []byte) (eth.Bytes32, error) { if !solabi.EmptyReader(reader) { return eth.Bytes32{}, fmt.Errorf("%w: too many bytes", ErrParsingSystemConfig) } + // Validate the EIP-1559 params (last 8 bytes of the 32-byte value) + if err := eip1559.ValidateHolocene1559Params(params[24:32]); err != nil { + return eth.Bytes32{}, fmt.Errorf("%w: %w", ErrInvalidEIP1559Params, err) + } return params, nil } diff --git a/op-node/rollup/derive/system_config_test.go b/op-node/rollup/derive/system_config_test.go index 8317587543a8e..f4c3f2c18b3e8 100644 --- a/op-node/rollup/derive/system_config_test.go +++ b/op-node/rollup/derive/system_config_test.go @@ -214,6 +214,74 @@ func TestProcessSystemConfigUpdateLogEvent(t *testing.T) { }, err: false, }, + { + name: "EIP1559Params_ZeroDenominatorNonZeroElasticity", + log: &types.Log{ + Topics: []common.Hash{ + ConfigUpdateEventABIHash, + ConfigUpdateEventVersion0, + SystemConfigUpdateEIP1559Params, + }, + }, + hook: func(t *testing.T, log *types.Log) *types.Log { + // denominator = 0, elasticity = 1 (invalid combination) + params := []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1} + numberData, err := oneUint256.Pack(new(big.Int).SetBytes(params)) + require.NoError(t, err) + data, err := bytesArgs.Pack(numberData) + require.NoError(t, err) + log.Data = data + return log + }, + config: eth.SystemConfig{}, + err: true, + }, + { + name: "EIP1559Params_NonZeroDenominatorZeroElasticity", + log: &types.Log{ + Topics: []common.Hash{ + ConfigUpdateEventABIHash, + ConfigUpdateEventVersion0, + SystemConfigUpdateEIP1559Params, + }, + }, + hook: func(t *testing.T, log *types.Log) *types.Log { + // denominator = 1, elasticity = 0 (invalid combination) + params := []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0} + numberData, err := oneUint256.Pack(new(big.Int).SetBytes(params)) + require.NoError(t, err) + data, err := bytesArgs.Pack(numberData) + require.NoError(t, err) + log.Data = data + return log + }, + config: eth.SystemConfig{}, + err: true, + }, + { + name: "EIP1559Params_BothZero", + log: &types.Log{ + Topics: []common.Hash{ + ConfigUpdateEventABIHash, + ConfigUpdateEventVersion0, + SystemConfigUpdateEIP1559Params, + }, + }, + hook: func(t *testing.T, log *types.Log) *types.Log { + // denominator = 0, elasticity = 0 (valid - uses pre-Holocene constants) + params := []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0} + numberData, err := oneUint256.Pack(new(big.Int).SetBytes(params)) + require.NoError(t, err) + data, err := bytesArgs.Pack(numberData) + require.NoError(t, err) + log.Data = data + return log + }, + config: eth.SystemConfig{ + EIP1559Params: eth.Bytes8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, + err: false, + }, { name: "OperatorFeeParams", log: &types.Log{ diff --git a/op-program/client/l2/fast_canon.go b/op-program/client/l2/fast_canon.go index dc4973af99a33..82c030d040e13 100644 --- a/op-program/client/l2/fast_canon.go +++ b/op-program/client/l2/fast_canon.go @@ -159,6 +159,8 @@ type chainContext struct { config *params.ChainConfig } +var _ core.ChainContext = (*chainContext)(nil) + func (c *chainContext) Engine() consensus.Engine { return c.engine } @@ -171,3 +173,15 @@ func (c *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header // The EVM should never call this method during eip-2935 historical block retrieval panic("unexpected call to GetHeader") } + +func (c *chainContext) CurrentHeader() *types.Header { + panic("unimplemented") +} + +func (c *chainContext) GetHeaderByHash(hash common.Hash) *types.Header { + panic("unimplemented") +} + +func (c *chainContext) GetHeaderByNumber(number uint64) *types.Header { + panic("unimplemented") +} diff --git a/op-program/client/mpt/db.go b/op-program/client/mpt/db.go index 265d591e3a93b..41c749b09c1fe 100644 --- a/op-program/client/mpt/db.go +++ b/op-program/client/mpt/db.go @@ -78,6 +78,10 @@ func (p *DB) Ancients() (uint64, error) { panic("not supported") } +func (p *DB) AncientBytes(kind string, id uint64, offset uint64, length uint64) ([]byte, error) { + panic("not supported") +} + func (p *DB) Tail() (uint64, error) { panic("not supported") } diff --git a/op-service/apis/sync_tester.go b/op-service/apis/sync_tester.go index 7e345cb2df881..d8b8674c43554 100644 --- a/op-service/apis/sync_tester.go +++ b/op-service/apis/sync_tester.go @@ -46,4 +46,6 @@ type EngineAPI interface { NewPayloadV2(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) NewPayloadV3(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash) (*eth.PayloadStatusV1, error) NewPayloadV4(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) + + ExchangeCapabilities(ctx context.Context, _ []string) []string } diff --git a/op-service/bgpo/oracle.go b/op-service/bgpo/oracle.go new file mode 100644 index 0000000000000..99b1a2d5f4bd7 --- /dev/null +++ b/op-service/bgpo/oracle.go @@ -0,0 +1,407 @@ +package bgpo + +import ( + "context" + "fmt" + "math/big" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/sources/caching" +) + +// BlobTipOracle tracks blob base gas prices by subscribing to new block headers +// and extracts the blob tip caps from blob txs from each block. +type BlobTipOracle struct { + sync.Mutex + + client *client.PollingClient + chainConfig *params.ChainConfig + log log.Logger + config *BlobTipOracleConfig + + // LRU cache for blob base fees by block number + baseFees *caching.LRUCache[uint64, *big.Int] + + // Cache for blob txs priority fees extracted from blocks (for SuggestBlobTipCap) + priorityFees *caching.LRUCache[uint64, []*big.Int] + + // Track the latest block number for GetLatestBlobBaseFee + latestBlock uint64 + + ctx context.Context + cancel context.CancelFunc + + sub ethereum.Subscription + + cachePopulated chan struct{} +} + +// rpcBlock structure for fetching blocks with transactions. +// When eth_getBlockByNumber is called with true, it returns full transaction objects. +type rpcBlock struct { + Number hexutil.Uint64 `json:"number"` + Hash hexutil.Bytes `json:"hash"` + Transactions []*types.Transaction `json:"transactions"` +} + +// BlobTipOracleConfig configures the blob tip oracle. +type BlobTipOracleConfig struct { + // NetworkTimeout is the timeout for network requests + NetworkTimeout time.Duration + // PricesCacheSize is the maximum number of blob base fees to cache + PricesCacheSize int + // BlockCacheSize is the maximum number of blocks to cache for RPC calls + BlockCacheSize int + // MaxBlocks is the default number of recent blocks to analyze in SuggestBlobTipCap + MaxBlocks int + // Percentile is the default percentile to use for blob tip cap suggestion + Percentile int + // Poll rate is the rate at which the oracle will poll for new blocks + PollRate time.Duration + // Metrics for cache tracking + Metrics caching.Metrics + // DefaultPriorityFee is the default priority fee to use for blob tip cap suggestion, if there are no recent blob txs + DefaultPriorityFee *big.Int +} + +// DefaultBlobTipOracleConfig returns a default configuration. +func DefaultBlobTipOracleConfig() *BlobTipOracleConfig { + return &BlobTipOracleConfig{ + PricesCacheSize: 1000, + BlockCacheSize: 100, + MaxBlocks: 20, + Percentile: 60, + PollRate: 2500 * time.Millisecond, + NetworkTimeout: 3 * time.Second, + Metrics: nil, + DefaultPriorityFee: big.NewInt(1), // 1 wei + } +} + +// NewBlobTipOracle creates a new blob tip oracle that will subscribe +// to newHeads and track blob base fees, and extract blob tip caps from blob txs. +func NewBlobTipOracle(ctx context.Context, rpcClient client.RPC, chainConfig *params.ChainConfig, log log.Logger, config *BlobTipOracleConfig) *BlobTipOracle { + defaultConfig := DefaultBlobTipOracleConfig() + if config == nil { + config = defaultConfig + } + if config.PricesCacheSize <= 0 { + config.PricesCacheSize = defaultConfig.PricesCacheSize + } + if config.BlockCacheSize <= 0 { + config.BlockCacheSize = defaultConfig.BlockCacheSize + } + if config.MaxBlocks <= 0 { + config.MaxBlocks = defaultConfig.MaxBlocks + } + if config.Percentile <= 0 || config.Percentile > 100 { + config.Percentile = defaultConfig.Percentile + } + + logger := log.With("module", "bgpo") + + pollClient := client.NewPollingClient(ctx, logger, rpcClient, client.WithPollRate(config.PollRate)) + + oracleCtx, cancel := context.WithCancel(ctx) + return &BlobTipOracle{ + config: config, + client: pollClient, + chainConfig: chainConfig, + log: log.With("module", "bgpo"), + baseFees: caching.NewLRUCache[uint64, *big.Int](config.Metrics, "bgpo_prices", config.PricesCacheSize), + priorityFees: caching.NewLRUCache[uint64, []*big.Int](config.Metrics, "bgpo_tips", config.BlockCacheSize), + ctx: oracleCtx, + cancel: cancel, + cachePopulated: make(chan struct{}), + } +} + +// WaitCachePopulated waits for the cache to be populated. +func (o *BlobTipOracle) WaitCachePopulated() { + select { + case <-o.cachePopulated: + o.log.Info("Done waiting for cache pre-population") + return + case <-o.ctx.Done(): + o.log.Error("Cache pre-population timed out", "ctx", o.ctx.Err()) + return + case <-time.After(o.config.NetworkTimeout * time.Duration(o.config.MaxBlocks)): + o.log.Error("Cache pre-population timed out after timeout", "timeout", o.config.NetworkTimeout, "maxBlocks", o.config.MaxBlocks) + return + } +} + +// Start begins subscribing to newHeads and processing headers. +// Before subscribing, it pre-populates the cache with the last MaxBlocks blocks. +// This method blocks until the context is canceled or an error occurs. +func (o *BlobTipOracle) Start() error { + // Pre-populate cache with recent blocks before subscribing + if err := o.prePopulateCache(); err != nil { + o.log.Warn("Failed to pre-populate cache, continuing anyway", "err", err) + } + + headers := make(chan *types.Header, 10) + + doSubscribe := func(ch chan<- *types.Header) (ethereum.Subscription, error) { + return o.client.Subscribe(o.ctx, "eth", ch, "newHeads") + } + + sub, err := doSubscribe(headers) + if err != nil { + return err + } + o.sub = sub + + o.log.Info("Blob tip oracle started, subscribed to newHeads") + + // Process headers as they arrive + for { + select { + case header := <-headers: + if err := o.processHeader(header); err != nil { + o.log.Error("Error processing header", "err", err, "block", header.Number.Uint64()) + } + case err := <-sub.Err(): + if err != nil { + o.log.Error("Subscription error", "err", err) + return err + } + return nil + case <-o.ctx.Done(): + o.log.Info("Blob tip oracle context canceled") + return nil + } + } +} + +// prePopulateCache fetches and processes the last MaxBlocks blocks to pre-populate the cache. +func (o *BlobTipOracle) prePopulateCache() error { + defer close(o.cachePopulated) // signal that the cache is populated and we can start using the oracle + now := time.Now() + + ctx, cancel := context.WithTimeout(o.ctx, o.config.NetworkTimeout) + defer cancel() + + // Get the latest block number + var latestBlockNum hexutil.Uint64 + if err := o.client.CallContext(ctx, &latestBlockNum, "eth_blockNumber"); err != nil { + return fmt.Errorf("failed to get latest block number: %w", err) + } + + latest := uint64(latestBlockNum) + var startBlock uint64 + if latest >= uint64(o.config.MaxBlocks) { + startBlock = latest - uint64(o.config.MaxBlocks) + 1 + } else { + startBlock = 0 + } + + o.log.Debug("Pre-populating cache", "from", startBlock, "to", latest, "blocks", latest-startBlock+1) + + // Fetch and process each block + for blockNum := startBlock; blockNum <= latest; blockNum++ { + // Fetch header + var header *types.Header + blockNumHex := hexutil.EncodeUint64(blockNum) + if err := o.client.CallContext(ctx, &header, "eth_getBlockByNumber", blockNumHex, false); err != nil { + o.log.Debug("Failed to fetch header for pre-population", "block", blockNum, "err", err) + continue + } + + // Process header (this will also trigger blob fee cap fetching) + if err := o.processHeader(header); err != nil { + o.log.Debug("Failed to process header for pre-population", "block", blockNum, "err", err) + continue + } + } + + o.log.Info("Cache pre-population complete", "blocks_processed", latest-startBlock+1, "took", time.Since(now)) + return nil +} + +// processHeader calculates and stores the blob base fee for the given header. +// It also triggers an asynchronous fetch of the full block to extract blob fee caps. +func (o *BlobTipOracle) processHeader(header *types.Header) error { + defer func(start time.Time) { + o.log.Debug("Processed header", "block", header.Number.Uint64(), "time", time.Since(start)) + }(time.Now()) + + o.Lock() + defer o.Unlock() + + blockNum := header.Number.Uint64() + + // Calculate blob base fee from the header + if _, ok := o.baseFees.Get(blockNum); ok { + o.log.Debug("Skipping blob base fee calculation, already processed", "block", blockNum, "latestBlock", o.latestBlock) + } else { + var blobBaseFee *big.Int + if header.ExcessBlobGas != nil { + blobBaseFee = eip4844.CalcBlobFee(o.chainConfig, header) + } + + if blobBaseFee != nil { + o.log.Debug("Adding blob base fee", "block", blockNum, "blobBaseFee", blobBaseFee.String()) + o.baseFees.Add(blockNum, blobBaseFee) + } else { + o.log.Debug("Block does not support blob transactions", "block", blockNum) + o.baseFees.Add(blockNum, big.NewInt(0)) + } + } + + // Fetch full block data and extract blob fee caps + o.fetchBlockBlobFeeCaps(blockNum, header.BaseFee) + + if blockNum > o.latestBlock { + o.latestBlock = blockNum + } + + return nil +} + +// fetchBlockBlobFeeCaps fetches a block and extracts blob fee caps, storing them in cache. +func (o *BlobTipOracle) fetchBlockBlobFeeCaps(blockNum uint64, baseFee *big.Int) { + // Check if we already have the blob fee caps cached + if _, ok := o.priorityFees.Get(blockNum); ok { + o.log.Debug("Skipping blob fee caps fetch, already processed", "block", blockNum) + return + } + + ctx, cancel := context.WithTimeout(o.ctx, o.config.NetworkTimeout) + defer cancel() + + // Fetch the block + var block rpcBlock + blockNumHex := hexutil.EncodeUint64(blockNum) + if err := o.client.CallContext(ctx, &block, "eth_getBlockByNumber", blockNumHex, true); err != nil { + o.log.Warn("Failed to fetch block for blob fee caps", "block", blockNum, "err", err) + return + } + + // Extract blob fee caps directly + tips := o.extractTipsForBlobTxs(block, baseFee) + + // Store in cache (even if empty, to avoid repeated fetches) + o.priorityFees.Add(blockNum, tips) +} + +// GetLatestBlobBaseFee returns the blob base fee for the most recently processed block. +// Returns (0, nil) if no blocks have been processed yet, the price was evicted from cache, +// or if the latest block doesn't support blob transactions. +func (o *BlobTipOracle) GetLatestBlobBaseFee() (uint64, *big.Int) { + o.Lock() + defer o.Unlock() + + if o.latestBlock == 0 { + return 0, nil + } + + price, ok := o.baseFees.Get(o.latestBlock) + if !ok { + // Price was evicted from cache or block was never processed + return 0, nil + } + if price == nil { + // Block doesn't contain blob transactions + return o.latestBlock, nil + } + // Return a copy to prevent external modification + return o.latestBlock, new(big.Int).Set(price) +} + +// SuggestBlobTipCap analyzes recent blocks to suggest an appropriate blob tip cap +// for blob transactions. It examines the last maxBlocks blocks and returns the +// percentile-th percentile of blob tip caps from blob transactions. +// This is similar to go-ethereum's oracle.SuggestTipCap but for tips solely on blob transactions (type 3). +// +// This method only reads from cache and does not make any RPC calls. Block data +// is fetched during block processing. +// +// If no blob transactions are found in recent blocks, it returns the current blob base fee +// plus a small buffer to ensure the transaction is competitive. +func (o *BlobTipOracle) SuggestBlobTipCap(ctx context.Context, maxBlocks int, percentile int) (*big.Int, error) { + if maxBlocks <= 0 { + maxBlocks = o.config.MaxBlocks + } + if percentile <= 0 || percentile > 100 { + percentile = o.config.Percentile + } + + // Get the latest block number from our tracked state (no RPC call) + o.Lock() + latestBlockNum := o.latestBlock + o.Unlock() + + if latestBlockNum == 0 { + return nil, fmt.Errorf("no blocks have been processed yet") + } + + // Collect blob fee caps from recent blocks (only from cache, no RPC calls) + var tips []*big.Int + startBlock := latestBlockNum + if startBlock >= uint64(maxBlocks) { + startBlock -= uint64(maxBlocks) + } else { + startBlock = 0 + } + + for blockNum := startBlock; blockNum <= latestBlockNum; blockNum++ { + // Only read from cache - no RPC calls + if t, ok := o.priorityFees.Get(blockNum); ok { + tips = append(tips, t...) + } + } + + // If we found blob transactions, calculate percentile + if len(tips) > 0 { + sort.Slice(tips, func(i, j int) bool { + return tips[i].Cmp(tips[j]) < 0 + }) + idx := (len(tips) - 1) * percentile / 100 + suggested := new(big.Int).Set(tips[idx]) + o.log.Debug("Suggested blob tip cap from recent transactions", "suggested", suggested.String(), "samples", len(tips), "percentile", percentile) + return suggested, nil + } + + // No blob transactions found, use the default priority fee - that should almost never happen, so we warn about it + o.log.Warn("No recent blob transactions found, using blob base fee + buffer", "block", latestBlockNum, "default_priority_fee", o.config.DefaultPriorityFee.String()) + return new(big.Int).Set(o.config.DefaultPriorityFee), nil +} + +// extractTipsForBlobTxs extracts tips for blob transactions from a block's transactions. +func (o *BlobTipOracle) extractTipsForBlobTxs(block rpcBlock, baseFee *big.Int) []*big.Int { + var tips []*big.Int + for _, tx := range block.Transactions { + // Check if it's a blob transaction (type 3) and has blob fee cap + if tx.Type() == types.BlobTxType { + tip, err := tx.EffectiveGasTip(baseFee) // tip calculated from execution gas, for a type 3 transaction + if err != nil { + o.log.Error("Failed to calculate effective gas tip", "block", uint64(block.Number), "err", err) + continue + } + + tips = append(tips, tip) + o.log.Debug("Extracted tip from blob tx", "block", uint64(block.Number), "tip", tip.String()) + } + } + return tips +} + +// Close stops the oracle and cleans up resources. +func (o *BlobTipOracle) Close() { + o.cancel() + if o.sub != nil { + o.sub.Unsubscribe() + } + o.log.Info("Blob tip oracle closed") +} diff --git a/op-service/bgpo/oracle_test.go b/op-service/bgpo/oracle_test.go new file mode 100644 index 0000000000000..d281c92f706fe --- /dev/null +++ b/op-service/bgpo/oracle_test.go @@ -0,0 +1,485 @@ +package bgpo + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + "github.com/holiman/uint256" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/testlog" +) + +type mockRPC struct { + mock.Mock +} + +func (m *mockRPC) CallContext(ctx context.Context, result any, method string, args ...any) error { + callArgs := make([]any, 0, len(args)) + callArgs = append(callArgs, args...) + args_ := m.Called(ctx, result, method, callArgs) + return args_.Error(0) +} + +func (m *mockRPC) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + args_ := m.Called(ctx, b) + return args_.Error(0) +} + +func (m *mockRPC) Subscribe(ctx context.Context, namespace string, channel any, args ...any) (ethereum.Subscription, error) { + args_ := m.Called(ctx, namespace, channel, args) + sub := args_.Get(0) + if sub == nil { + return nil, args_.Error(1) + } + return sub.(ethereum.Subscription), args_.Error(1) +} + +func (m *mockRPC) Close() { + m.Called() +} + +var _ client.RPC = (*mockRPC)(nil) + +func createHeader(blockNum uint64, excessBlobGas *uint64) *types.Header { + header := &types.Header{ + Number: big.NewInt(int64(blockNum)), + ParentHash: common.Hash{}, + Time: uint64(time.Now().Unix()), + BaseFee: big.NewInt(1000000000), // 1 gwei + } + if excessBlobGas != nil { + header.ExcessBlobGas = excessBlobGas + } + return header +} + +func createBlobTx(gasTip *big.Int, gasFeeCap *big.Int, blobFeeCap *big.Int) *types.Transaction { + // Create a minimal blob transaction + // Note: This is a simplified version for testing + tx := types.NewTx(&types.BlobTx{ + ChainID: uint256.NewInt(1), + Nonce: 0, + GasTipCap: uint256.MustFromBig(gasTip), + GasFeeCap: uint256.MustFromBig(gasFeeCap), + Gas: 21000, + To: common.Address{}, + Value: uint256.NewInt(0), + Data: []byte{}, + BlobFeeCap: uint256.MustFromBig(blobFeeCap), + BlobHashes: []common.Hash{common.Hash{}}, + }) + return tx +} + +func TestNewBlobGasPriceOracle(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelInfo) + + t.Run("with nil config", func(t *testing.T) { + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, nil) + require.NotNil(t, oracle) + require.Equal(t, 20, oracle.config.MaxBlocks) + require.Equal(t, 60, oracle.config.Percentile) + }) + + t.Run("with custom config", func(t *testing.T) { + config := &BlobTipOracleConfig{ + PricesCacheSize: 500, + BlockCacheSize: 50, + MaxBlocks: 10, + Percentile: 70, + } + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, config) + require.NotNil(t, oracle) + require.Equal(t, 10, oracle.config.MaxBlocks) + require.Equal(t, 70, oracle.config.Percentile) + }) + + t.Run("with invalid config values", func(t *testing.T) { + config := &BlobTipOracleConfig{ + PricesCacheSize: -1, + BlockCacheSize: -1, + MaxBlocks: -1, + Percentile: 150, // Invalid + } + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, config) + require.NotNil(t, oracle) + // Should use defaults + require.Equal(t, 20, oracle.config.MaxBlocks) + require.Equal(t, 60, oracle.config.Percentile) + }) +} + +func TestProcessHeader(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelError) + + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + MaxBlocks: 5, + Percentile: 60, + }) + + t.Run("process header with excess blob gas", func(t *testing.T) { + excessBlobGas := uint64(1000000) + header := createHeader(100, &excessBlobGas) + + // Mock block fetch for blob fee caps + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[1] == true + })). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + block.Number = hexutil.Uint64(100) + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{} + }). + Return(nil).Once() + + err := oracle.processHeader(header) + require.NoError(t, err) + + // Check latest block + latestBlock, latestFee := oracle.GetLatestBlobBaseFee() + require.Equal(t, uint64(100), latestBlock) + require.NotNil(t, latestFee) + }) + + t.Run("process header without excess blob gas", func(t *testing.T) { + header := createHeader(101, nil) + + // Mock block fetch + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[1] == true + })). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + block.Number = hexutil.Uint64(101) + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{} + }). + Return(nil).Once() + + err := oracle.processHeader(header) + require.NoError(t, err) + + // Latest block should be updated + latestBlock, _ := oracle.GetLatestBlobBaseFee() + require.Equal(t, uint64(101), latestBlock) + }) + + mrpc.AssertExpectations(t) +} + +func TestGetLatestBlobBaseFee(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelError) + + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + }) + + t.Run("no blocks processed", func(t *testing.T) { + block, fee := oracle.GetLatestBlobBaseFee() + require.Equal(t, uint64(0), block) + require.Nil(t, fee) + }) + + t.Run("with processed blocks", func(t *testing.T) { + excessBlobGas := uint64(1000000) + header1 := createHeader(300, &excessBlobGas) + header2 := createHeader(301, &excessBlobGas) + + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[1] == true + })). + Return(nil).Twice(). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + callArgs := args[3].([]any) + blockNumHex := callArgs[0].(string) + if blockNumHex == "0x12c" { // 300 + block.Number = hexutil.Uint64(300) + } else { + block.Number = hexutil.Uint64(301) + } + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{} + }) + + err := oracle.processHeader(header1) + require.NoError(t, err) + + err = oracle.processHeader(header2) + require.NoError(t, err) + + block, fee := oracle.GetLatestBlobBaseFee() + require.Equal(t, uint64(301), block) + require.NotNil(t, fee) + }) + + mrpc.AssertExpectations(t) +} + +func TestSuggestBlobTipCap(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelError) + + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + MaxBlocks: 5, + Percentile: 60, + }) + + t.Run("no blocks processed", func(t *testing.T) { + suggested, err := oracle.SuggestBlobTipCap(ctx, 0, 0) + require.Error(t, err) + require.Nil(t, suggested) + require.Contains(t, err.Error(), "no blocks have been processed") + }) + + t.Run("with_blob_transactions", func(t *testing.T) { + // Process blocks with blob transactions + excessBlobGas := uint64(1000000) + for i := uint64(400); i <= 404; i++ { + header := createHeader(i, &excessBlobGas) + + // Create blob transactions with different tip + gasFeeCap := big.NewInt(3000000000) + blobFeeCap := big.NewInt(3000000000) + tip := big.NewInt(int64((i-400)*1000000 + 1000000)) // 1M, 2M, 3M, 4M, 5M + blobTx := createBlobTx(tip, gasFeeCap, blobFeeCap) + + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[1] == true + })). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + block.Number = hexutil.Uint64(i) + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{blobTx} + }). + Return(nil).Once() + + err := oracle.processHeader(header) + require.NoError(t, err) + } + + // Test with default parameters + suggested, err := oracle.SuggestBlobTipCap(ctx, 0, 0) + require.NoError(t, err) + require.NotNil(t, suggested) + // Should be 60th percentile of [1M, 2M, 3M, 4M, 5M] = 3M (index 2 of 4) + require.Equal(t, big.NewInt(3000000), suggested) + + // Test with custom percentile + suggested, err = oracle.SuggestBlobTipCap(ctx, 5, 80) + require.NoError(t, err) + require.NotNil(t, suggested) + // 80th percentile of [1M, 2M, 3M, 4M, 5M] = 4M (index 3 of 4) + require.Equal(t, big.NewInt(4000000), suggested) + }) + + t.Run("no blob transactions, fallback to base fee", func(t *testing.T) { + oracle2 := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + MaxBlocks: 5, + Percentile: 60, + DefaultPriorityFee: big.NewInt(101), + }) + + excessBlobGas := uint64(1000000) + header := createHeader(500, &excessBlobGas) + + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[1] == true + })). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + block.Number = hexutil.Uint64(500) + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{} // No blob transactions + }). + Return(nil).Once() + + err := oracle2.processHeader(header) + require.NoError(t, err) + + suggested, err := oracle2.SuggestBlobTipCap(ctx, 0, 0) + require.NoError(t, err) + require.Equal(t, big.NewInt(101), suggested) + }) + + mrpc.AssertExpectations(t) +} + +func TestPrePopulateCache(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelError) + + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + MaxBlocks: 3, + Percentile: 60, + }) + + t.Run("pre-populate with recent blocks", func(t *testing.T) { + latestBlock := uint64(1000) + + // Mock eth_blockNumber (called with no args - empty slice) + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_blockNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 0 + })). + Run(func(args mock.Arguments) { + result := args[1].(*hexutil.Uint64) + *result = hexutil.Uint64(latestBlock) + }). + Return(nil).Once() + + // Mock header fetches for blocks 998, 999, 1000 + excessBlobGas := uint64(1000000) + for i := uint64(998); i <= 1000; i++ { + header := createHeader(i, &excessBlobGas) + + // Mock header fetch (with false for full transactions) + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[0] == hexutil.EncodeUint64(i) && args[1] == false + })). + Run(func(args mock.Arguments) { + result := args[1].(**types.Header) + *result = header + }). + Return(nil).Once() + + // Mock block fetch for blob fee caps (with true for full transactions) + mrpc.On("CallContext", mock.Anything, mock.Anything, "eth_getBlockByNumber", mock.MatchedBy(func(args []any) bool { + return len(args) == 2 && args[0] == hexutil.EncodeUint64(i) && args[1] == true + })). + Run(func(args mock.Arguments) { + block := args[1].(*rpcBlock) + block.Number = hexutil.Uint64(i) + block.Hash = common.Hash{}.Bytes() + block.Transactions = []*types.Transaction{} + }). + Return(nil).Once() + } + + err := oracle.prePopulateCache() + require.NoError(t, err) + + latestBlockNum, _ := oracle.GetLatestBlobBaseFee() + require.Equal(t, uint64(1000), latestBlockNum) + }) + + mrpc.AssertExpectations(t) +} + +func TestExtractBlobFeeCaps(t *testing.T) { + ctx := context.Background() + mrpc := new(mockRPC) + chainConfig := params.MainnetChainConfig + logger := testlog.Logger(t, log.LevelError) + + oracle := NewBlobTipOracle(ctx, mrpc, chainConfig, logger, &BlobTipOracleConfig{ + PricesCacheSize: 10, + BlockCacheSize: 10, + }) + + t.Run("extract_from_blob_transactions", func(t *testing.T) { + baseFee := big.NewInt(2) // 2 wei + blobFeeCap := big.NewInt(300) // 300 wei + gasFeeCap := big.NewInt(300) // 300 wei + block := rpcBlock{ + Number: hexutil.Uint64(600), + Hash: common.Hash{}.Bytes(), + Transactions: []*types.Transaction{ + createBlobTx(big.NewInt(7), gasFeeCap, blobFeeCap), + createBlobTx(big.NewInt(8), gasFeeCap, blobFeeCap), + createBlobTx(big.NewInt(9), gasFeeCap, blobFeeCap), + createBlobTx(big.NewInt(400), gasFeeCap, blobFeeCap), + }, + } + + tips := oracle.extractTipsForBlobTxs(block, baseFee) + require.Len(t, tips, 4) + require.Equal(t, big.NewInt(7), tips[0]) + require.Equal(t, big.NewInt(8), tips[1]) + require.Equal(t, big.NewInt(9), tips[2]) + require.Equal(t, big.NewInt(298), tips[3]) // gasFeeCap - baseFee; limited to gasFeeCap, even though the blob tip cap is 400 wei + }) + + t.Run("extract ignores non-blob transactions", func(t *testing.T) { + baseFee := big.NewInt(1000000) + block := rpcBlock{ + Number: hexutil.Uint64(601), + Hash: common.Hash{}.Bytes(), + Transactions: []*types.Transaction{ + types.NewTx(&types.LegacyTx{ + Nonce: 0, + GasPrice: big.NewInt(1000000), + Gas: 21000, + To: &common.Address{}, + Value: big.NewInt(0), + Data: []byte{}, + }), + }, + } + + feeCaps := oracle.extractTipsForBlobTxs(block, baseFee) + require.Len(t, feeCaps, 0) + }) + + t.Run("extract_from_mixed_transactions", func(t *testing.T) { + baseFee := big.NewInt(1000000) + blobFeeCap := big.NewInt(3000000000) + gasFeeCap := big.NewInt(3000000000) + block := rpcBlock{ + Number: hexutil.Uint64(602), + Hash: common.Hash{}.Bytes(), + Transactions: []*types.Transaction{ + types.NewTx(&types.LegacyTx{ + Nonce: 0, + GasPrice: big.NewInt(1000000), + Gas: 21000, + To: &common.Address{}, + Value: big.NewInt(0), + Data: []byte{}, + }), + createBlobTx(big.NewInt(5000000), gasFeeCap, blobFeeCap), + createBlobTx(big.NewInt(6000000), gasFeeCap, blobFeeCap), + }, + } + + tips := oracle.extractTipsForBlobTxs(block, baseFee) + require.Len(t, tips, 2) + require.Equal(t, big.NewInt(5000000), tips[0]) + require.Equal(t, big.NewInt(6000000), tips[1]) + }) +} diff --git a/op-service/dial/dial.go b/op-service/dial/dial.go index e6ce72628ed2c..fa0d36f885841 100644 --- a/op-service/dial/dial.go +++ b/op-service/dial/dial.go @@ -65,6 +65,15 @@ func DialSupervisorClientWithTimeout(ctx context.Context, log log.Logger, url st return sources.NewSupervisorClient(rpcCl), nil } +func DialSuperNodeClientWithTimeout(ctx context.Context, log log.Logger, url string, callerOpts ...client.RPCOption) (*sources.SuperNodeClient, error) { + rpcCl, err := dialClientWithTimeout(ctx, log, url, callerOpts...) + if err != nil { + return nil, err + } + + return sources.NewSuperNodeClient(rpcCl), nil +} + // DialRPCClientWithTimeout attempts to dial the RPC provider using the provided URL. // The timeout and retry logic is handled internally by the client. func DialRPCClientWithTimeout(ctx context.Context, log log.Logger, url string, opts ...rpc.ClientOption) (*rpc.Client, error) { diff --git a/op-service/eth/super_root.go b/op-service/eth/super_root.go index 1b9601ccbd1f0..8334a37b310a6 100644 --- a/op-service/eth/super_root.go +++ b/op-service/eth/super_root.go @@ -78,6 +78,28 @@ func (o *SuperV1) Marshal() []byte { return buf } +func (o *SuperV1) MarshalJSON() ([]byte, error) { + return json.Marshal(&superV1JsonMarshalling{ + Timestamp: hexutil.Uint64(o.Timestamp), + Chains: o.Chains, + }) +} + +func (o *SuperV1) UnmarshalJSON(input []byte) error { + var dec superV1JsonMarshalling + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + o.Timestamp = uint64(dec.Timestamp) + o.Chains = dec.Chains + return nil +} + +type superV1JsonMarshalling struct { + Timestamp hexutil.Uint64 `json:"timestamp"` + Chains []ChainIDAndOutput `json:"chains"` +} + func UnmarshalSuperRoot(data []byte) (Super, error) { if len(data) < 1 { return nil, ErrInvalidSuperRoot diff --git a/op-service/eth/super_root_test.go b/op-service/eth/super_root_test.go index 316b94e63e2f9..f7968dceeba16 100644 --- a/op-service/eth/super_root_test.go +++ b/op-service/eth/super_root_test.go @@ -2,6 +2,7 @@ package eth import ( "encoding/binary" + "encoding/json" "testing" "github.com/stretchr/testify/require" @@ -77,6 +78,33 @@ func TestSuperRootV1Codec(t *testing.T) { }) } +func TestSuperRootV1JSON(t *testing.T) { + t.Run("UseHexForTimestamp", func(t *testing.T) { + chainA := ChainIDAndOutput{ChainID: ChainIDFromUInt64(11), Output: Bytes32{0x01}} + superRoot := NewSuperV1(7000, chainA) + jsonData, err := json.Marshal(superRoot) + require.NoError(t, err) + + values := make(map[string]any) + err = json.Unmarshal(jsonData, &values) + require.NoError(t, err) + require.Equal(t, "0x1b58", values["timestamp"]) + }) + + t.Run("RoundTrip", func(t *testing.T) { + chainA := ChainIDAndOutput{ChainID: ChainIDFromUInt64(11), Output: Bytes32{0x01}} + chainB := ChainIDAndOutput{ChainID: ChainIDFromUInt64(12), Output: Bytes32{0x02}} + chainC := ChainIDAndOutput{ChainID: ChainIDFromUInt64(13), Output: Bytes32{0x03}} + superRoot := NewSuperV1(7000, chainA, chainB, chainC) + data, err := json.Marshal(superRoot) + require.NoError(t, err) + var actual SuperV1 + err = json.Unmarshal(data, &actual) + require.NoError(t, err) + require.Equal(t, superRoot, &actual) + }) +} + func TestResponseToSuper(t *testing.T) { t.Run("SingleChain", func(t *testing.T) { input := SuperRootResponse{ diff --git a/op-service/eth/superroot_at_timestamp.go b/op-service/eth/superroot_at_timestamp.go new file mode 100644 index 0000000000000..e2e742478b7d2 --- /dev/null +++ b/op-service/eth/superroot_at_timestamp.go @@ -0,0 +1,38 @@ +package eth + +// OutputWithRequiredL1 is the full Output and its source L1 block +type OutputWithRequiredL1 struct { + Output *OutputResponse `json:"output"` + RequiredL1 BlockID `json:"required_l1"` +} + +type SuperRootResponseData struct { + + // VerifiedRequiredL1 is the minimum L1 block including the required data to fully verify all blocks at this timestamp + VerifiedRequiredL1 BlockID `json:"verified_required_l1"` + + // Super is the unhashed data for the superroot at the given timestamp after all verification is applied. + Super Super `json:"super"` + + // SuperRoot is the superroot at the given timestamp after all verification is applied. + SuperRoot Bytes32 `json:"super_root"` +} + +// AtTimestampResponse is the response superroot_atTimestamp +type SuperRootAtTimestampResponse struct { + // CurrentL1 is the highest L1 block that has been fully derived and verified by all chains. + CurrentL1 BlockID `json:"current_l1"` + + // OptimisticAtTimestamp is the L2 block that would be applied if verification were assumed to be successful, + // and the minimum L1 block required to derive them. If Data is nil, some chains may be absent from this map, + // indicating that there is no optimistic block for the chain at the requested timestamp that can be derived + // from the L1 data currently processed. + OptimisticAtTimestamp map[ChainID]OutputWithRequiredL1 `json:"optimistic_at_timestamp"` + + // ChainIDs are the chain IDs in the dependency set at the requested timestamp, sorted ascending. + ChainIDs []ChainID `json:"chain_ids"` + + // Data provides information about the super root at the requested timestamp if present. If block data at the + // requested timestamp is not present, the data will be nil. + Data *SuperRootResponseData `json:"data,omitempty"` +} diff --git a/op-service/sources/supernode_client.go b/op-service/sources/supernode_client.go new file mode 100644 index 0000000000000..c201145c42fba --- /dev/null +++ b/op-service/sources/supernode_client.go @@ -0,0 +1,28 @@ +package sources + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type SuperNodeClient struct { + rpc client.RPC +} + +func NewSuperNodeClient(rpc client.RPC) *SuperNodeClient { + return &SuperNodeClient{ + rpc: rpc, + } +} + +func (c *SuperNodeClient) SuperRootAtTimestamp(ctx context.Context, timestamp uint64) (result eth.SuperRootAtTimestampResponse, err error) { + err = c.rpc.CallContext(ctx, &result, "superroot_atTimestamp", hexutil.Uint64(timestamp)) + return +} + +func (cl *SuperNodeClient) Close() { + cl.rpc.Close() +} diff --git a/op-service/sources/supernode_client_test.go b/op-service/sources/supernode_client_test.go new file mode 100644 index 0000000000000..951268cebd13f --- /dev/null +++ b/op-service/sources/supernode_client_test.go @@ -0,0 +1,148 @@ +package sources + +import ( + "context" + "errors" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestSuperNodeClient_SuperRootAtTimestamp(t *testing.T) { + t.Run("Success", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + client := NewSuperNodeClient(rpc) + + timestamp := uint64(245) + + chainA := eth.ChainIDFromUInt64(1) + chainB := eth.ChainIDFromUInt64(4) + expected := eth.SuperRootAtTimestampResponse{ + CurrentL1: eth.BlockID{ + Number: 305, + Hash: common.Hash{0xdd, 0xee, 0xff}, + }, + ChainIDs: []eth.ChainID{chainA, chainB}, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + chainA: { + Output: ð.OutputResponse{ + Version: eth.Bytes32{0x01}, + OutputRoot: eth.Bytes32{0x11, 0x12}, + BlockRef: eth.L2BlockRef{ + Hash: common.Hash{0x22}, + Number: 472, + ParentHash: common.Hash{0xdd}, + Time: 9895839, + L1Origin: eth.BlockID{ + Hash: common.Hash{0xee}, + Number: 9802, + }, + SequenceNumber: 4982, + }, + WithdrawalStorageRoot: common.Hash{0xff}, + StateRoot: common.Hash{0xaa}, + }, + RequiredL1: eth.BlockID{ + Hash: common.Hash{0xbb}, + Number: 7842, + }, + }, + }, + Data: ð.SuperRootResponseData{ + VerifiedRequiredL1: eth.BlockID{ + Hash: common.Hash{0xcc}, + Number: 7411111, + }, + Super: eth.NewSuperV1(timestamp, eth.ChainIDAndOutput{ + ChainID: chainA, + Output: eth.Bytes32{0xa1}, + }, eth.ChainIDAndOutput{ + ChainID: chainB, + Output: eth.Bytes32{0xa2}, + }), + SuperRoot: eth.Bytes32{0xdd}, + }, + } + rpc.On("CallContext", ctx, new(eth.SuperRootAtTimestampResponse), + "superroot_atTimestamp", []any{hexutil.Uint64(timestamp)}).Run(func(args mock.Arguments) { + *args[1].(*eth.SuperRootAtTimestampResponse) = expected + }).Return([]error{nil}) + result, err := client.SuperRootAtTimestamp(ctx, timestamp) + require.NoError(t, err) + require.Equal(t, expected, result) + }) + + t.Run("NotFound", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + client := NewSuperNodeClient(rpc) + + timestamp := uint64(245) + + chainA := eth.ChainIDFromUInt64(1) + chainB := eth.ChainIDFromUInt64(4) + expected := eth.SuperRootAtTimestampResponse{ + CurrentL1: eth.BlockID{ + Number: 305, + Hash: common.Hash{0xdd, 0xee, 0xff}, + }, + ChainIDs: []eth.ChainID{chainA, chainB}, + OptimisticAtTimestamp: map[eth.ChainID]eth.OutputWithRequiredL1{ + chainA: { + Output: ð.OutputResponse{ + Version: eth.Bytes32{0x01}, + OutputRoot: eth.Bytes32{0x11, 0x12}, + BlockRef: eth.L2BlockRef{ + Hash: common.Hash{0x22}, + Number: 472, + ParentHash: common.Hash{0xdd}, + Time: 9895839, + L1Origin: eth.BlockID{ + Hash: common.Hash{0xee}, + Number: 9802, + }, + SequenceNumber: 4982, + }, + WithdrawalStorageRoot: common.Hash{0xff}, + StateRoot: common.Hash{0xaa}, + }, + RequiredL1: eth.BlockID{ + Hash: common.Hash{0xbb}, + Number: 7842, + }, + }, + }, + Data: nil, // No super root found, so data is nil. + } + rpc.On("CallContext", ctx, new(eth.SuperRootAtTimestampResponse), + "superroot_atTimestamp", []any{hexutil.Uint64(timestamp)}).Run(func(args mock.Arguments) { + *args[1].(*eth.SuperRootAtTimestampResponse) = expected + }).Return([]error{nil}) + result, err := client.SuperRootAtTimestamp(ctx, timestamp) + require.NoError(t, err) + require.Equal(t, expected, result) + }) + + t.Run("Error", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + client := NewSuperNodeClient(rpc) + + timestamp := uint64(245) + + rpc.On("CallContext", ctx, new(eth.SuperRootAtTimestampResponse), + "superroot_atTimestamp", []any{hexutil.Uint64(timestamp)}).Return([]error{errors.New("blah blah blah: not found")}) + _, err := client.SuperRootAtTimestamp(ctx, timestamp) + require.NotErrorIs(t, err, ethereum.NotFound) // should not convert to not found even though it contains not found + require.NotNil(t, err) + }) +} diff --git a/op-service/txmgr/estimator.go b/op-service/txmgr/estimator.go index 0a3aa02694054..bef435c72385a 100644 --- a/op-service/txmgr/estimator.go +++ b/op-service/txmgr/estimator.go @@ -6,26 +6,27 @@ import ( "math/big" ) -type GasPriceEstimatorFn func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) +type GasPriceEstimatorFn func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, *big.Int, error) -func DefaultGasPriceEstimatorFn(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) { +func DefaultGasPriceEstimatorFn(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, *big.Int, error) { tip, err := backend.SuggestGasTipCap(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } head, err := backend.HeaderByNumber(ctx, nil) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } if head.BaseFee == nil { - return nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") + return nil, nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") } - blobFee, err := backend.BlobBaseFee(ctx) + blobBaseFee, err := backend.BlobBaseFee(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } - return tip, head.BaseFee, blobFee, nil + blobTipFee := big.NewInt(0) // using zero value for the default gas price estimator (if bgpo is not available) + return tip, head.BaseFee, blobTipFee, blobBaseFee, nil } diff --git a/op-service/txmgr/metrics/noop.go b/op-service/txmgr/metrics/noop.go index 47b1a52c54fc2..446180ebe035d 100644 --- a/op-service/txmgr/metrics/noop.go +++ b/op-service/txmgr/metrics/noop.go @@ -18,6 +18,7 @@ func (*NoopTxMetrics) TxPublished(string) {} func (*NoopTxMetrics) RecordBaseFee(*big.Int) {} func (*NoopTxMetrics) RecordBlobBaseFee(*big.Int) {} func (*NoopTxMetrics) RecordTipCap(*big.Int) {} +func (*NoopTxMetrics) RecordBlobTipCap(*big.Int) {} func (*NoopTxMetrics) RPCError() {} type FakeTxMetrics struct { diff --git a/op-service/txmgr/metrics/tx_metrics.go b/op-service/txmgr/metrics/tx_metrics.go index fe013a0b8d07f..eaf8d54a92066 100644 --- a/op-service/txmgr/metrics/tx_metrics.go +++ b/op-service/txmgr/metrics/tx_metrics.go @@ -20,6 +20,7 @@ type TxMetricer interface { RecordBaseFee(*big.Int) RecordBlobBaseFee(*big.Int) RecordTipCap(*big.Int) + RecordBlobTipCap(*big.Int) RPCError() } @@ -38,6 +39,7 @@ type TxMetrics struct { baseFee prometheus.Gauge blobBaseFee prometheus.Gauge tipCap prometheus.Gauge + blobTipCap prometheus.Gauge rpcError prometheus.Counter } @@ -131,6 +133,12 @@ func MakeTxMetrics(ns string, factory metrics.Factory) TxMetrics { Help: "Latest L1 suggested tip cap (in Wei)", Subsystem: "txmgr", }), + blobTipCap: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Name: "blob_tipcap_wei", + Help: "Latest Blob suggested tip cap (in Wei)", + Subsystem: "txmgr", + }), rpcError: factory.NewCounter(prometheus.CounterOpts{ Namespace: ns, Name: "rpc_error_count", @@ -189,6 +197,10 @@ func (t *TxMetrics) RecordTipCap(tipcap *big.Int) { t.tipCap.Set(tcf) } +func (t *TxMetrics) RecordBlobTipCap(blobTipCap *big.Int) { + bcf, _ := blobTipCap.Float64() + t.blobTipCap.Set(bcf) +} func (t *TxMetrics) RPCError() { t.rpcError.Inc() } diff --git a/op-service/txmgr/mocks/TxManager.go b/op-service/txmgr/mocks/TxManager.go index a87291d8319e8..db3acedf19979 100644 --- a/op-service/txmgr/mocks/TxManager.go +++ b/op-service/txmgr/mocks/TxManager.go @@ -169,7 +169,7 @@ func (_m *TxManager) SendAsync(ctx context.Context, candidate txmgr.TxCandidate, } // SuggestGasPriceCaps provides a mock function with given fields: ctx -func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) { +func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, *big.Int, error) { ret := _m.Called(ctx) if len(ret) == 0 { @@ -179,8 +179,9 @@ func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.In var r0 *big.Int var r1 *big.Int var r2 *big.Int - var r3 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, *big.Int, *big.Int, error)); ok { + var r3 *big.Int + var r4 error + if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, *big.Int, *big.Int, *big.Int, error)); ok { return rf(ctx) } if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { @@ -207,13 +208,21 @@ func (_m *TxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.In } } - if rf, ok := ret.Get(3).(func(context.Context) error); ok { + if rf, ok := ret.Get(3).(func(context.Context) *big.Int); ok { r3 = rf(ctx) } else { - r3 = ret.Error(3) + if ret.Get(3) != nil { + r3 = ret.Get(3).(*big.Int) + } + } + + if rf, ok := ret.Get(4).(func(context.Context) error); ok { + r4 = rf(ctx) + } else { + r4 = ret.Error(4) } - return r0, r1, r2, r3 + return r0, r1, r2, r3, r4 } // NewTxManager creates a new instance of TxManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 81fd87aeaa6f3..5805d44a3401c 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -174,20 +174,16 @@ func TestQueue_Send(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() + backend := newMockBackendWithNonce(newGasPricer(3)) conf := configWithNumConfs(1) conf.ReceiptQueryInterval = 1 * time.Second // simulate a network send conf.RebroadcastInterval.Store(int64(2 * time.Second)) // possibly rebroadcast once before resubmission if unconfirmed conf.ResubmissionTimeout.Store(int64(3 * time.Second)) // resubmit to detect errors conf.SafeAbortNonceTooLowCount = 1 - backend := newMockBackendWithNonce(newGasPricer(3)) - mgr := &SimpleTxManager{ - chainID: conf.ChainID, - name: "TEST", - cfg: conf, - backend: backend, - l: testlog.Logger(t, log.LevelCrit), - metr: &metrics.NoopTxMetrics{}, - } + conf.Backend = backend + + mgr, err := NewSimpleTxManagerFromConfig("TEST", testlog.Logger(t, log.LevelCrit), &metrics.NoopTxMetrics{}, conf) + require.NoError(t, err) // track the nonces, and return any expected errors from tx sending var ( @@ -320,8 +316,6 @@ func TestQueue_Send_MaxPendingMetrics(t *testing.T) { metrics := metrics.FakeTxMetrics{} conf := configWithNumConfs(1) conf.Backend = backend - conf.NetworkTimeout = 1 * time.Second - conf.ChainID = big.NewInt(1) mgr, err := NewSimpleTxManagerFromConfig("TEST", testlog.Logger(t, log.LevelDebug), &metrics, conf) require.NoError(t, err) diff --git a/op-service/txmgr/rpc_test.go b/op-service/txmgr/rpc_test.go index 7b6f26bae91af..ca5c1ef0ac20f 100644 --- a/op-service/txmgr/rpc_test.go +++ b/op-service/txmgr/rpc_test.go @@ -11,14 +11,14 @@ import ( ) func TestTxmgrRPC(t *testing.T) { - minBaseFeeInit := big.NewInt(1000) - minPriorityFeeInit := big.NewInt(2000) + minBaseFeeInit := big.NewInt(2000) + minPriorityFeeInit := big.NewInt(1000) minBlobFeeInit := big.NewInt(3000) feeThresholdInit := big.NewInt(4000) rebroadcastIntervalInit := int64(25) bumpFeeRetryTimeInit := int64(100) - cfg := Config{} + cfg := configWithNumConfs(1) cfg.MinBaseFee.Store(minBaseFeeInit) cfg.MinTipCap.Store(minPriorityFeeInit) cfg.MinBlobTxFee.Store(minBlobFeeInit) @@ -26,7 +26,7 @@ func TestTxmgrRPC(t *testing.T) { cfg.RebroadcastInterval.Store(rebroadcastIntervalInit) cfg.ResubmissionTimeout.Store(bumpFeeRetryTimeInit) - h := newTestHarnessWithConfig(t, &cfg) + h := newTestHarnessWithConfig(t, cfg) appVersion := "test" server := oprpc.NewServer( diff --git a/op-service/txmgr/test_txmgr.go b/op-service/txmgr/test_txmgr.go index 9b885711d1d67..f81d30552be40 100644 --- a/op-service/txmgr/test_txmgr.go +++ b/op-service/txmgr/test_txmgr.go @@ -40,7 +40,7 @@ func (m *TestTxManager) WaitOnJammingTx(ctx context.Context) error { } func (m *TestTxManager) makeStuckTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) { - gasTipCap, _, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) + gasTipCap, _, gasBlobTipCap, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) if err != nil { return nil, err } @@ -73,7 +73,7 @@ func (m *TestTxManager) makeStuckTx(ctx context.Context, candidate TxCandidate) Sidecar: sidecar, Nonce: nonce, } - if err := finishBlobTx(message, m.chainID, gasTipCap, gasFeeCap, blobFeeCap, candidate.Value); err != nil { + if err := finishBlobTx(message, m.chainID, gasBlobTipCap, gasFeeCap, blobFeeCap, candidate.Value); err != nil { return nil, err } txMessage = message diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index d6851540dcccf..60dd6a2e25653 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -95,7 +95,7 @@ type TxManager interface { // SuggestGasPriceCaps suggests what the new tip, base fee, and blob base fee should be based on // the current L1 conditions. `blobBaseFee` will be nil if 4844 is not yet active. - SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error) + SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobTipCap *big.Int, blobBaseFee *big.Int, err error) } // ETHBackend is the set of methods that the transaction manager uses to resubmit gas & determine @@ -168,6 +168,10 @@ func NewSimpleTxManagerFromConfig(name string, l log.Logger, m metrics.TxMetrice return nil, fmt.Errorf("invalid config: %w", err) } + if conf.GasPriceEstimatorFn == nil { + conf.GasPriceEstimatorFn = DefaultGasPriceEstimatorFn + } + return &SimpleTxManager{ chainID: conf.ChainID, name: name, @@ -351,15 +355,13 @@ func (m *SimpleTxManager) prepare(ctx context.Context, candidate TxCandidate) (* // NOTE: Otherwise, the [SimpleTxManager] will query the specified backend for an estimate. func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (*types.Transaction, error) { m.l.Debug("crafting Transaction", "blobs", len(candidate.Blobs), "calldata_size", len(candidate.TxData)) - gasTipCap, baseFee, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) + gasTipCap, baseFee, blobTipCap, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) if err != nil { m.metr.RPCError() return nil, fmt.Errorf("failed to get gas price info or it's too high: %w", err) } gasFeeCap := calcGasFeeCap(baseFee, gasTipCap) - gasLimit := candidate.GasLimit - var sidecar *types.BlobTxSidecar var blobHashes []common.Hash if len(candidate.Blobs) > 0 { @@ -378,32 +380,9 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* } } - // Calculate the intrinsic gas for the transaction - callMsg := ethereum.CallMsg{ - From: m.cfg.From, - To: candidate.To, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Data: candidate.TxData, - Value: candidate.Value, - } - if len(blobHashes) > 0 { - callMsg.BlobGasFeeCap = blobBaseFee - callMsg.BlobHashes = blobHashes - } - // If the gas limit is set, we can use that as the gas - if gasLimit == 0 { - gas, err := m.backend.EstimateGas(ctx, callMsg) - if err != nil { - return nil, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) - } - gasLimit = gas - } else { - callMsg.Gas = gasLimit - _, err := m.backend.CallContract(ctx, callMsg, nil) - if err != nil { - return nil, fmt.Errorf("failed to call: %w", errutil.TryAddRevertReason(err)) - } + candidate.GasLimit, err = m.estimateOrValidateCandidateTxGas(ctx, candidate, gasTipCap, gasFeeCap, blobHashes, blobBaseFee) + if err != nil { + return nil, err } var txMessage types.TxData @@ -415,11 +394,24 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* message := &types.BlobTx{ To: *candidate.To, Data: candidate.TxData, - Gas: gasLimit, + Gas: candidate.GasLimit, BlobHashes: blobHashes, Sidecar: sidecar, } - if err := finishBlobTx(message, m.chainID, gasTipCap, gasFeeCap, blobFeeCap, candidate.Value); err != nil { + + // graceful upgrade to using blob tip oracle, for now we just compare the fees based on current codebase and the new bgpo module + { + oracleSavings := blobTipCap.Cmp(gasTipCap) < 0 + + // TODO(18618): before activating the blob tip oracle, confirm in prod that we mostly get oracleSavings == true, otherwise + // it is not worth it using the oracle + m.l.Info("Comparison between blobTipCap and gasTipCap", "blobTipCap", blobTipCap, "gasTipCap", gasTipCap, "oracle_blob_savings", oracleSavings) + + // TODO(18618): when activating the blob tip oracle, we should remove the assignment and use the suggested blob tip cap from the oracle + blobTipCap = gasTipCap + } + + if err := finishBlobTx(message, m.chainID, blobTipCap, gasFeeCap, blobFeeCap, candidate.Value); err != nil { return nil, fmt.Errorf("failed to create blob transaction: %w", err) } txMessage = message @@ -431,12 +423,46 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* GasFeeCap: gasFeeCap, Value: candidate.Value, Data: candidate.TxData, - Gas: gasLimit, + Gas: candidate.GasLimit, } } return m.signWithNextNonce(ctx, txMessage) // signer sets the nonce field of the tx } +// estimateOrValidateCandidateTxGas either: +// a) validates and returns the candidate.GasLimit (if set) using CallContract +// b) estimates the gas limit using backend.EstimatGas and returns it. +func (m *SimpleTxManager) estimateOrValidateCandidateTxGas(ctx context.Context, candidate TxCandidate, gasTipCap, gasFeeCap *big.Int, blobHashes []common.Hash, blobBaseFee *big.Int) (uint64, error) { + // Calculate the intrinsic gas for the transaction + callMsg := ethereum.CallMsg{ + From: m.cfg.From, + To: candidate.To, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Data: candidate.TxData, + Value: candidate.Value, + } + if len(blobHashes) > 0 { + callMsg.BlobGasFeeCap = blobBaseFee + callMsg.BlobHashes = blobHashes + } + // If the gas limit is set, we can use that as the gas + if candidate.GasLimit == 0 { + gas, err := m.backend.EstimateGas(ctx, callMsg) + if err != nil { + return 0, fmt.Errorf("failed to estimate gas: %w", errutil.TryAddRevertReason(err)) + } + return gas, nil + } + + callMsg.Gas = candidate.GasLimit + _, err := m.backend.CallContract(ctx, callMsg, nil) + if err != nil { + return 0, fmt.Errorf("failed to call: %w", errutil.TryAddRevertReason(err)) + } + return candidate.GasLimit, nil +} + func (m *SimpleTxManager) GetMinBaseFee() *big.Int { return m.cfg.MinBaseFee.Load() } @@ -888,7 +914,9 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash, // multiple of the suggested values. func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transaction) (*types.Transaction, error) { m.txLogger(tx, true).Info("bumping gas price for transaction") - tip, baseFee, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) + tip, baseFee, blobTipCap, blobBaseFee, err := m.SuggestGasPriceCaps(ctx) + // TODO(18618): when activating the blob tip oracle, integrate blobTipCap into the rest of the logic around bumping the gas price when replacing txs + _ = blobTipCap if err != nil { m.txLogger(tx, false).Warn("failed to get suggested gas tip and base fee", "err", err) return nil, err @@ -987,24 +1015,20 @@ func (m *SimpleTxManager) increaseGasPrice(ctx context.Context, tx *types.Transa // SuggestGasPriceCaps suggests what the new tip, base fee, and blob base fee should be based on // the current L1 conditions. `blobBaseFee` will be nil if 4844 is not yet active. // Note that an error will be returned if MaxTipCap or MaxBaseFee is exceeded. -func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, error) { +func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *big.Int, *big.Int, *big.Int, error) { cCtx, cancel := context.WithTimeout(ctx, m.cfg.NetworkTimeout) defer cancel() - estimatorFn := m.gasPriceEstimatorFn - if estimatorFn == nil { - estimatorFn = DefaultGasPriceEstimatorFn - } - - tip, baseFee, blobFee, err := estimatorFn(cCtx, m.backend) + tip, baseFee, blobTipCap, blobBaseFee, err := m.gasPriceEstimatorFn(cCtx, m.backend) if err != nil { m.metr.RPCError() - return nil, nil, nil, fmt.Errorf("failed to get gas price estimates: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to get gas price estimates: %w", err) } m.metr.RecordTipCap(tip) m.metr.RecordBaseFee(baseFee) - m.metr.RecordBlobBaseFee(blobFee) + m.metr.RecordBlobBaseFee(blobBaseFee) + m.metr.RecordBlobTipCap(blobTipCap) // Enforce minimum base fee and tip cap minTipCap := m.cfg.MinTipCap.Load() @@ -1012,12 +1036,21 @@ func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *b minBaseFee := m.cfg.MinBaseFee.Load() maxBaseFee := m.cfg.MaxBaseFee.Load() + // Enforce minimum tip cap (for non-blob txs) if minTipCap != nil && tip.Cmp(minTipCap) == -1 { m.l.Debug("Enforcing min tip cap", "minTipCap", minTipCap, "origTipCap", tip) tip = new(big.Int).Set(minTipCap) } if maxTipCap != nil && tip.Cmp(maxTipCap) > 0 { - return nil, nil, nil, fmt.Errorf("tip is too high: %v, cap:%v", tip, maxTipCap) + return nil, nil, nil, nil, fmt.Errorf("tip is too high: %v, cap:%v", tip, maxTipCap) + } + + // Comparing if the configured min tip cap is higher than the suggested blob tip cap, and if so, it means we are overpaying for the transaction + if minTipCap != nil && blobTipCap.Cmp(minTipCap) == -1 { + m.l.Warn("Suggested blobTipCap is lower than the configured min tip cap for blob txs", "minTipCap", minTipCap, "blobTipCap", blobTipCap) + } + if maxTipCap != nil && blobTipCap.Cmp(maxTipCap) > 0 { + return nil, nil, nil, nil, fmt.Errorf("blob tip cap is too high: %v, cap:%v", blobTipCap, maxTipCap) } if minBaseFee != nil && baseFee.Cmp(minBaseFee) == -1 { @@ -1025,10 +1058,11 @@ func (m *SimpleTxManager) SuggestGasPriceCaps(ctx context.Context) (*big.Int, *b baseFee = new(big.Int).Set(minBaseFee) } if maxBaseFee != nil && baseFee.Cmp(maxBaseFee) > 0 { - return nil, nil, nil, fmt.Errorf("baseFee is too high: %v, cap:%v", baseFee, maxBaseFee) + return nil, nil, nil, nil, fmt.Errorf("baseFee is too high: %v, cap:%v", baseFee, maxBaseFee) } - return tip, baseFee, blobFee, nil + m.l.Info("Suggested gas price caps", "gasTipCap", tip, "baseFee", baseFee, "blobTipCap", blobTipCap, "blobBaseFee", blobBaseFee) + return tip, baseFee, blobTipCap, blobBaseFee, nil } // checkLimits checks that the tip and baseFee have not increased by more than the configured multipliers diff --git a/op-service/txmgr/txmgr_test.go b/op-service/txmgr/txmgr_test.go index 652576f401b91..2e6d18ebd6b1f 100644 --- a/op-service/txmgr/txmgr_test.go +++ b/op-service/txmgr/txmgr_test.go @@ -57,15 +57,8 @@ func newTestHarnessWithConfig(t *testing.T, cfg *Config) *testHarness { g := newGasPricer(3) backend := newMockBackend(g) cfg.Backend = backend - mgr := &SimpleTxManager{ - chainID: cfg.ChainID, - name: "TEST", - cfg: cfg, - backend: cfg.Backend, - l: testlog.Logger(t, log.LevelCrit), - metr: &metrics.NoopTxMetrics{}, - } - + mgr, err := NewSimpleTxManagerFromConfig("TEST", testlog.Logger(t, log.LevelCrit), &metrics.NoopTxMetrics{}, cfg) + require.NoError(t, err) return &testHarness{ cfg: cfg, mgr: mgr, @@ -107,6 +100,7 @@ func (h testHarness) createBlobTxCandidate() TxCandidate { func configWithNumConfs(numConfirmations uint64) *Config { cfg := Config{ + ChainID: big.NewInt(1), ReceiptQueryInterval: 50 * time.Millisecond, NumConfirmations: numConfirmations, SafeAbortNonceTooLowCount: 3, @@ -114,10 +108,11 @@ func configWithNumConfs(numConfirmations uint64) *Config { Signer: func(ctx context.Context, from common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil }, - From: common.Address{}, - RetryInterval: 1 * time.Millisecond, - MaxRetries: 5, - CellProofTime: math.MaxUint64, + From: common.Address{}, + RetryInterval: 1 * time.Millisecond, + NetworkTimeout: 1 * time.Second, + MaxRetries: 5, + CellProofTime: math.MaxUint64, } cfg.RebroadcastInterval.Store(int64(time.Second / 2)) @@ -428,7 +423,6 @@ func TestTxMgrTxSendTimeout(t *testing.T) { testSendVariants(t, func(t *testing.T, send testSendVariantsFn) { conf := configWithNumConfs(1) conf.TxSendTimeout = 3 * time.Second - conf.NetworkTimeout = 1 * time.Second h := newTestHarnessWithConfig(t, conf) @@ -1262,8 +1256,8 @@ func TestIncreaseGasPrice(t *testing.T) { { name: "supports extension through custom estimator", run: func(t *testing.T) { - estimator := func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) { - return big.NewInt(100), big.NewInt(3000), big.NewInt(100), nil + estimator := func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, *big.Int, error) { + return big.NewInt(100), big.NewInt(3000), big.NewInt(100), big.NewInt(100), nil } _, newTx, err := doGasPriceIncrease(t, 70, 2000, 80, 2100, estimator) require.NoError(t, err) @@ -1319,6 +1313,9 @@ func testIncreaseGasPriceLimit(t *testing.T, lt gasPriceLimitTest) { } cfg := Config{ + ChainID: big.NewInt(1), + NetworkTimeout: 1 * time.Second, + TxNotInMempoolTimeout: 1 * time.Second, ReceiptQueryInterval: 50 * time.Millisecond, NumConfirmations: 1, SafeAbortNonceTooLowCount: 3, @@ -1331,14 +1328,11 @@ func testIncreaseGasPriceLimit(t *testing.T, lt gasPriceLimitTest) { cfg.FeeLimitMultiplier.Store(5) cfg.FeeLimitThreshold.Store(lt.thr) cfg.MinBlobTxFee.Store(defaultMinBlobTxFee) + cfg.Backend = &borkedBackend + + mgr, err := NewSimpleTxManagerFromConfig("TEST", testlog.Logger(t, log.LevelCrit), &metrics.NoopTxMetrics{}, &cfg) + require.NoError(t, err) - mgr := &SimpleTxManager{ - cfg: &cfg, - name: "TEST", - backend: &borkedBackend, - l: testlog.Logger(t, log.LevelCrit), - metr: &metrics.NoopTxMetrics{}, - } lastGoodTx := types.NewTx(&types.DynamicFeeTx{ GasTipCap: big.NewInt(10), GasFeeCap: big.NewInt(100), @@ -1347,7 +1341,6 @@ func testIncreaseGasPriceLimit(t *testing.T, lt gasPriceLimitTest) { // Run increaseGasPrice a bunch of times in a row to simulate a very fast resubmit loop to make // sure it errors out without a runaway fee increase. ctx := context.Background() - var err error for { var tmpTx *types.Transaction tmpTx, err = mgr.increaseGasPrice(ctx, lastGoodTx) @@ -1493,7 +1486,7 @@ func TestMinFees(t *testing.T) { conf.MinTipCap.Store(tt.minTipCap) h := newTestHarnessWithConfig(t, conf) - tip, baseFee, _, err := h.mgr.SuggestGasPriceCaps(context.Background()) + tip, baseFee, _, _, err := h.mgr.SuggestGasPriceCaps(context.Background()) require.NoError(err) if tt.expectMinBaseFee { @@ -1540,7 +1533,7 @@ func TestMaxFees(t *testing.T) { conf.MaxTipCap.Store(tt.maxTipCap) h := newTestHarnessWithConfig(t, conf) - tip, baseFee, _, err := h.mgr.SuggestGasPriceCaps(context.Background()) + tip, baseFee, _, _, err := h.mgr.SuggestGasPriceCaps(context.Background()) if tt.expectMaxBaseFee { require.Equal(err, fmt.Errorf("baseFee is too high: %v, cap:%v", h.gasPricer.baseBaseFee, tt.maxBaseFee), "expect baseFee is too high") } diff --git a/op-supernode/flags/flags.go b/op-supernode/flags/flags.go index bf3e6580d5f71..b0b9631b0960f 100644 --- a/op-supernode/flags/flags.go +++ b/op-supernode/flags/flags.go @@ -96,12 +96,12 @@ func FullDynamicFlags(chains []uint64) []cli.Flag { for _, f := range opnodeflags.Flags { baseName := f.Names()[0] // vn.all.* env var/alias prefixing - allEnvs := prefixEnvVar(f, "VN_ALL_") + allEnvs := upgradeEnvVarPrefixes(f, opnodeflags.EnvVarPrefix, "VN_ALL") allAliases := prefixAliases(f, VNFlagGlobalPrefix) final = append(final, renameFlagWithEnv(f, VNFlagGlobalPrefix+baseName, allEnvs, allAliases)) // per-chain for _, id := range chains { - perChainEnvs := prefixEnvVar(f, fmt.Sprintf("VN_%d_", id)) + perChainEnvs := upgradeEnvVarPrefixes(f, opnodeflags.EnvVarPrefix, fmt.Sprintf("VN_%d", id)) perAliases := prefixAliases(f, fmt.Sprintf("%s%d.", VNFlagNamePrefix, id)) final = append(final, renameFlagWithEnv(f, fmt.Sprintf("%s%d.%s", VNFlagNamePrefix, id, baseName), perChainEnvs, perAliases)) } diff --git a/op-supernode/flags/virtual_flags.go b/op-supernode/flags/virtual_flags.go index 4c5ff50caf100..5a152ad9bf5af 100644 --- a/op-supernode/flags/virtual_flags.go +++ b/op-supernode/flags/virtual_flags.go @@ -137,21 +137,23 @@ func renameFlagWithEnv(f cli.Flag, name string, envs []string, aliases []string) } } -// prefixEnvVar prefixes the env vars of the given flag with the given middle string -// e.g. "VN_ALL_" or "VN_123_". -func prefixEnvVar(f cli.Flag, mid string) []string { +// upgradeEnvVarPrefixes returns a slice of the env vars of the given flag +// each with a modified prefix, formed by the OP_SUPERNODE prefix +// followed by given infix such as "VN_ALL" +// e.g. "OP_NODE_FINALITY_DELAY" becomes "OP_SUPERNODE_VN_ALL_FINALITY_DELAY" +// or "OP_SUPERNODE_VN_123_FINALITY_DELAY". +func upgradeEnvVarPrefixes(f cli.Flag, existingPrefix, newInfix string) []string { envs := f.(interface{ GetEnvVars() []string }).GetEnvVars() if len(envs) == 0 { return nil } out := make([]string, 0, len(envs)) for _, e := range envs { - idx := strings.Index(e, "_") - if idx < 0 { - continue + suffix := strings.TrimPrefix(e, existingPrefix+"_") + if suffix == e { + panic("encountered unprefixed flag") } - suffix := e[idx+1:] - out = append(out, EnvVarPrefix+"_"+mid+suffix) + out = append(out, EnvVarPrefix+"_"+newInfix+"_"+suffix) } return out } diff --git a/op-supernode/flags/virtual_test.go b/op-supernode/flags/virtual_test.go index fb7058ae75175..a033e78bcbe31 100644 --- a/op-supernode/flags/virtual_test.go +++ b/op-supernode/flags/virtual_test.go @@ -6,6 +6,7 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" opnodeflags "github.com/ethereum-optimism/optimism/op-node/flags" @@ -84,6 +85,19 @@ func TestParseChainsVariants(t *testing.T) { } } +func TestUpgradeEnvVarPrefixes(t *testing.T) { + flag := &cli.StringFlag{Name: "flag", EnvVars: []string{"OP_NODE_FINALITY_DELAY"}} + got := upgradeEnvVarPrefixes(flag, "OP_NODE", "VN_987") + expected := []string{"OP_SUPERNODE_VN_987_FINALITY_DELAY"} + require.Equal(t, expected, got) + got = upgradeEnvVarPrefixes(flag, "OP_NODE", "VN_ALL") + expected = []string{"OP_SUPERNODE_VN_ALL_FINALITY_DELAY"} + require.Equal(t, expected, got) + + badFlag := &cli.StringFlag{Name: "flag", EnvVars: []string{"BAD_FLAG_FINALITY_DELAY"}} + require.Panics(t, func() { upgradeEnvVarPrefixes(badFlag, "OP_NODE", "VN_987") }) +} + func TestFullDynamicFlags_ClonesAllFlagsForChainsAndGlobal(t *testing.T) { chains := []uint64{100, 200} flagsOut := FullDynamicFlags(chains) diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index 31d4a9c29fe0b..e4ae516a4f234 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -2,7 +2,9 @@ package superroot import ( "context" + "errors" "fmt" + "slices" "github.com/ethereum-optimism/optimism/op-service/eth" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" @@ -32,77 +34,45 @@ func (s *Superroot) RPCService() interface{} { return &superrootAPI{s: s} } type superrootAPI struct{ s *Superroot } -// OutputWithSource is the full Output and its source L1 block -type OutputWithSource struct { - Output *eth.OutputResponse - SourceL1 eth.BlockID -} - -// L2WithRequiredL1 is a verified L2 block and the minimum L1 block at which the verification is possible -type L2WithRequiredL1 struct { - L2 eth.BlockID - MinRequiredL1 eth.BlockID -} - -// atTimestampResponse is the response superroot_atTimestamp -// it contains: -// - CurrentL1Derived: the current L1 block that each chain has derived up to (without any verification) -// - CurrentL1Verified: the current L1 block that each verifier has processed up to -// - VerifiedAtTimestamp: the L2 blocks which are fully verified at the given timestamp, and the minimum L1 block at which verification is possible -// - OptimisticAtTimestamp: the L2 blocks which would be applied if verification were assumed to be successful, and their L1 sources -// - SuperRoot: the superroot at the given timestamp using verified L2 blocks -type atTimestampResponse struct { - CurrentL1Derived map[eth.ChainID]eth.BlockID - CurrentL1Verified map[string]eth.BlockID - VerifiedAtTimestamp map[eth.ChainID]L2WithRequiredL1 - OptimisticAtTimestamp map[eth.ChainID]OutputWithSource - MinCurrentL1 eth.BlockID - MinVerifiedRequiredL1 eth.BlockID - SuperRoot eth.Bytes32 -} - // AtTimestamp computes the super-root at the given timestamp, plus additional information about the current L1s, verified L2s, and optimistic L2s -func (api *superrootAPI) AtTimestamp(ctx context.Context, timestamp uint64) (atTimestampResponse, error) { +func (api *superrootAPI) AtTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) { return api.s.atTimestamp(ctx, timestamp) } -func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (atTimestampResponse, error) { - currentL1Derived := map[eth.ChainID]eth.BlockID{} - // there are no Verification Activities yet, so there is no call to make to collect their CurrentL1 - // this will be replaced with a call to the Verification Activities when they are implemented - currentL1Verified := map[string]eth.BlockID{} - verified := map[eth.ChainID]L2WithRequiredL1{} - optimistic := map[eth.ChainID]OutputWithSource{} +func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) { + optimistic := map[eth.ChainID]eth.OutputWithRequiredL1{} minCurrentL1 := eth.BlockID{} minVerifiedRequiredL1 := eth.BlockID{} chainOutputs := make([]eth.ChainIDAndOutput, 0, len(s.chains)) - // get current l1s + // Get current l1s // this informs callers that the chains local views have considered at least up to this L1 block - // but does not guarantee verifiers have processed this L1 block yet. This field is likely unhelpful, but I await feedback to confirm + // TODO(#18651): Currently there are no verifiers to consider, but once there are, this needs to be updated to consider if + // they have also processed the L1 data. for chainID, chain := range s.chains { currentL1, err := chain.CurrentL1(ctx) if err != nil { s.log.Warn("failed to get current L1", "chain_id", chainID.String(), "err", err) - return atTimestampResponse{}, err + return eth.SuperRootAtTimestampResponse{}, err } - currentL1Derived[chainID] = currentL1.ID() if currentL1.ID().Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { minCurrentL1 = currentL1.ID() } } + notFound := false + chainIDs := make([]eth.ChainID, 0, len(s.chains)) // collect verified and optimistic L2 and L1 blocks at the given timestamp for chainID, chain := range s.chains { + chainIDs = append(chainIDs, chainID) // verifiedAt returns the L2 block which is fully verified at the given timestamp, and the minimum L1 block at which verification is possible verifiedL2, verifiedL1, err := chain.VerifiedAt(ctx, timestamp) - if err != nil { - s.log.Warn("failed to get verified L1", "chain_id", chainID.String(), "err", err) - return atTimestampResponse{}, fmt.Errorf("%w: %w", ethereum.NotFound, err) - } - verified[chainID] = L2WithRequiredL1{ - L2: verifiedL2, - MinRequiredL1: verifiedL1, + if errors.Is(err, ethereum.NotFound) { + notFound = true + continue // To allow other chains to populate unverified blocks + } else if err != nil { + s.log.Warn("failed to get verified block", "chain_id", chainID.String(), "err", err) + return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get verified block: %w", err) } if verifiedL1.Number < minVerifiedRequiredL1.Number || minVerifiedRequiredL1 == (eth.BlockID{}) { minVerifiedRequiredL1 = verifiedL1 @@ -111,38 +81,44 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (atTimest outRoot, err := chain.OutputRootAtL2BlockNumber(ctx, verifiedL2.Number) if err != nil { s.log.Warn("failed to compute output root at L2 block", "chain_id", chainID.String(), "l2_number", verifiedL2.Number, "err", err) - return atTimestampResponse{}, fmt.Errorf("%w: %w", ethereum.NotFound, err) + return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to compute output root at L2 block %d for chain ID %v: %w", verifiedL2.Number, chainID, err) } chainOutputs = append(chainOutputs, eth.ChainIDAndOutput{ChainID: chainID, Output: outRoot}) // Optimistic output is the full output at the optimistic L2 block for the timestamp optimisticOut, err := chain.OptimisticOutputAtTimestamp(ctx, timestamp) if err != nil { - s.log.Warn("failed to get optimistic L1", "chain_id", chainID.String(), "err", err) - return atTimestampResponse{}, fmt.Errorf("%w: %w", ethereum.NotFound, err) + s.log.Warn("failed to get optimistic block", "chain_id", chainID.String(), "err", err) + return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get optimistic block at timestamp %v for chain ID %v: %w", timestamp, chainID, err) } // Also include the source L1 for context _, optimisticL1, err := chain.OptimisticAt(ctx, timestamp) if err != nil { s.log.Warn("failed to get optimistic source L1", "chain_id", chainID.String(), "err", err) - return atTimestampResponse{}, fmt.Errorf("%w: %w", ethereum.NotFound, err) + return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get optimistic source L1 at timestamp %v for chain ID %v: %w", timestamp, chainID, err) } - optimistic[chainID] = OutputWithSource{ - Output: optimisticOut, - SourceL1: optimisticL1, + optimistic[chainID] = eth.OutputWithRequiredL1{ + Output: optimisticOut, + RequiredL1: optimisticL1, } } - // Build super root from collected outputs - superV1 := eth.NewSuperV1(timestamp, chainOutputs...) - superRoot := eth.SuperRoot(superV1) - - return atTimestampResponse{ - CurrentL1Derived: currentL1Derived, - CurrentL1Verified: currentL1Verified, - VerifiedAtTimestamp: verified, + slices.SortFunc(chainIDs, func(a, b eth.ChainID) int { + return a.Cmp(b) + }) + response := eth.SuperRootAtTimestampResponse{ + CurrentL1: minCurrentL1, OptimisticAtTimestamp: optimistic, - MinCurrentL1: minCurrentL1, - MinVerifiedRequiredL1: minVerifiedRequiredL1, - SuperRoot: superRoot, - }, nil + ChainIDs: chainIDs, + } + if !notFound { + // Build super root from collected outputs + superV1 := eth.NewSuperV1(timestamp, chainOutputs...) + superRoot := eth.SuperRoot(superV1) + response.Data = ð.SuperRootResponseData{ + VerifiedRequiredL1: minVerifiedRequiredL1, + Super: superV1, + SuperRoot: superRoot, + } + } + return response, nil } diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index f85acd8572ee2..c5d4d50003efd 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/ethereum/go-ethereum" gethlog "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -97,14 +98,12 @@ func TestSuperroot_AtTimestamp_Succeeds(t *testing.T) { api := &superrootAPI{s: s} out, err := api.AtTimestamp(context.Background(), 123) require.NoError(t, err) - require.Len(t, out.CurrentL1Derived, 2) - require.Len(t, out.VerifiedAtTimestamp, 2) require.Len(t, out.OptimisticAtTimestamp, 2) // min values - require.Equal(t, uint64(2000), out.MinCurrentL1.Number) - require.Equal(t, uint64(1000), out.MinVerifiedRequiredL1.Number) + require.Equal(t, uint64(2000), out.CurrentL1.Number) + require.Equal(t, uint64(1000), out.Data.VerifiedRequiredL1.Number) // With zero outputs, the superroot will be deterministic, just ensure it's set - _ = out.SuperRoot + _ = out.Data.SuperRoot } func TestSuperroot_AtTimestamp_ComputesSuperRoot(t *testing.T) { @@ -141,7 +140,7 @@ func TestSuperroot_AtTimestamp_ComputesSuperRoot(t *testing.T) { {ChainID: eth.ChainIDFromUInt64(420), Output: out2}, } expected := eth.SuperRoot(eth.NewSuperV1(ts, chainOutputs...)) - require.Equal(t, expected, resp.SuperRoot) + require.Equal(t, expected, resp.Data.SuperRoot) } func TestSuperroot_AtTimestamp_ErrorOnCurrentL1(t *testing.T) { @@ -170,6 +169,30 @@ func TestSuperroot_AtTimestamp_ErrorOnVerifiedAt(t *testing.T) { require.Error(t, err) } +func TestSuperroot_AtTimestamp_NotFoundOnVerifiedAt(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verifiedErr: fmt.Errorf("nope: %w", ethereum.NotFound), + }, + eth.ChainIDFromUInt64(11): &mockCC{ + verL2: eth.BlockID{Number: 200}, + verL1: eth.BlockID{Number: 1100}, + optL2: eth.BlockID{Number: 200}, + optL1: eth.BlockID{Number: 1100}, + output: eth.Bytes32{0x12}, + currentL1: eth.BlockRef{Number: 2100}, + }, + } + s := New(gethlog.New(), chains) + api := &superrootAPI{s: s} + actual, err := api.AtTimestamp(context.Background(), 123) + require.NoError(t, err) + require.Nil(t, actual.Data) + require.NotContains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(10)) + require.Contains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(11)) +} + func TestSuperroot_AtTimestamp_ErrorOnOutputRoot(t *testing.T) { t.Parallel() chains := map[eth.ChainID]cc.ChainContainer{ @@ -206,8 +229,6 @@ func TestSuperroot_AtTimestamp_EmptyChains(t *testing.T) { api := &superrootAPI{s: s} out, err := api.AtTimestamp(context.Background(), 123) require.NoError(t, err) - require.Len(t, out.CurrentL1Derived, 0) - require.Len(t, out.VerifiedAtTimestamp, 0) require.Len(t, out.OptimisticAtTimestamp, 0) } diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index 0e262ee94db1c..f3122b0be240f 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -276,6 +276,7 @@ func (c *simpleChainContainer) CurrentL1(ctx context.Context) (eth.BlockRef, err } // VerifiedAt returns the verified L2 and L1 blocks for the given L2 timestamp. +// Must return ethereum.NotFound if there is no safe block at the specified timestamp. func (c *simpleChainContainer) VerifiedAt(ctx context.Context, ts uint64) (l2, l1 eth.BlockID, err error) { l2Block, err := c.SafeBlockAtTimestamp(ctx, ts) if err != nil { diff --git a/op-supernode/supernode/chain_container/engine_controller/engine_controller.go b/op-supernode/supernode/chain_container/engine_controller/engine_controller.go index 1fbe41706b2d8..1da93c3a93d83 100644 --- a/op-supernode/supernode/chain_container/engine_controller/engine_controller.go +++ b/op-supernode/supernode/chain_container/engine_controller/engine_controller.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum/go-ethereum" gethlog "github.com/ethereum/go-ethereum/log" ) @@ -16,6 +17,7 @@ import ( type EngineController interface { // SafeBlockAtTimestamp returns the L2 block ref for the block at or before the given timestamp, // clamped to the current SAFE head. + // Must return ethereum.NotFound if there is no safe block at the specified timestamp. SafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) // OutputV0AtBlockNumber returns the output preimage for the given L2 block number. OutputV0AtBlockNumber(ctx context.Context, num uint64) (*eth.OutputV0, error) @@ -60,9 +62,10 @@ func NewEngineControllerFromConfig(ctx context.Context, log gethlog.Logger, vncf var ( ErrNoEngineClient = errors.New("engine client not initialized") ErrNoRollupConfig = errors.New("rollup config not available") - ErrNotFound = errors.New("not found") ) +// SafeBlockAtTimestamp returns the L2 block ref for the block at or before the given timestamp, +// clamped to the current SAFE head. Must return ethereum.NotFound if no safe block is available at the timestamp. func (e *simpleEngineController) SafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { if e.l2 == nil { return eth.L2BlockRef{}, ErrNoEngineClient @@ -81,7 +84,7 @@ func (e *simpleEngineController) SafeBlockAtTimestamp(ctx context.Context, ts ui } if num > safeHead.Number { e.log.Warn("engine_controller: target block number exceeds safe head", "targetBlockNumber", num, "safeHead", safeHead.Number) - return eth.L2BlockRef{}, ErrNotFound + return eth.L2BlockRef{}, ethereum.NotFound } e.log.Debug("engine_controller: computed safe block number from timestamp", "timestamp", ts, "targetBlockNumber", num, "safeHead", safeHead.Number, "safeHeadErr", err) diff --git a/op-supernode/supernode/chain_container/engine_controller/engine_controller_test.go b/op-supernode/supernode/chain_container/engine_controller/engine_controller_test.go index 6545b15f7ef20..013544f985d49 100644 --- a/op-supernode/supernode/chain_container/engine_controller/engine_controller_test.go +++ b/op-supernode/supernode/chain_container/engine_controller/engine_controller_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" gethlog "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" @@ -94,7 +95,7 @@ func TestEngineController_TargetBlockNumber(t *testing.T) { require.Equal(t, m.ref, numRef) // ts = genesis + 2*1000 => block #1000, with safe head now below target _, err = ec.SafeBlockAtTimestamp(context.Background(), 1_000+2*1000) - require.ErrorIs(t, err, ErrNotFound) + require.ErrorIs(t, err, ethereum.NotFound) } func TestEngineController_SentinelErrors(t *testing.T) { diff --git a/op-sync-tester/synctester/backend/sync_tester.go b/op-sync-tester/synctester/backend/sync_tester.go index 45633843715fb..51d7492662166 100644 --- a/op-sync-tester/synctester/backend/sync_tester.go +++ b/op-sync-tester/synctester/backend/sync_tester.go @@ -220,6 +220,27 @@ func (s *SyncTester) ChainId(ctx context.Context) (hexutil.Big, error) { }) } +func (s *SyncTester) ExchangeCapabilities(ctx context.Context, _ []string) []string { + return []string{ + // getPayload + "engine_getPayloadV1", + "engine_getPayloadV2", + "engine_getPayloadV3", + "engine_getPayloadV4", + + // forkchoiceUpdated + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_forkchoiceUpdatedV3", + + // newPayload + "engine_newPayloadV1", + "engine_newPayloadV2", + "engine_newPayloadV3", + "engine_newPayloadV4", + } +} + // GetPayloadV1 only supports V1 payloads. func (s *SyncTester) GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ExecutionPayloadEnvelope, error) { diff --git a/op-sync-tester/synctester/frontend/engine.go b/op-sync-tester/synctester/frontend/engine.go index eefdcd4a6b6be..3124068971b48 100644 --- a/op-sync-tester/synctester/frontend/engine.go +++ b/op-sync-tester/synctester/frontend/engine.go @@ -64,3 +64,7 @@ func (e *EngineFrontend) NewPayloadV3(ctx context.Context, payload *eth.Executio func (e *EngineFrontend) NewPayloadV4(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) { return e.b.NewPayloadV4(ctx, payload, versionedHashes, beaconRoot, executionRequests) } + +func (e *EngineFrontend) ExchangeCapabilities(ctx context.Context, args []string) []string { + return e.b.ExchangeCapabilities(ctx, args) +} diff --git a/ops/ai-eng/contracts-test-maintenance/no-need-changes.toml b/ops/ai-eng/contracts-test-maintenance/no-need-changes.toml index 9b1330b9c16b5..5f15b91d92c76 100644 --- a/ops/ai-eng/contracts-test-maintenance/no-need-changes.toml +++ b/ops/ai-eng/contracts-test-maintenance/no-need-changes.toml @@ -30,3 +30,21 @@ recorded_at = "2025-12-12T17:32:49Z" devin_session_id = "50e7ca21d9264fe6a62c7ec944da7e43" run_id = "20251212_173200" reason = "Test coverage is already comprehensive with all functions and code paths tested. The constructor has three fuzz tests covering EOA, contract, and zero address recipients with thorough assertions." + +[[tests]] +test_path = "test/periphery/drippie/dripchecks/CheckTrue.t.sol" +contract_path = "src/periphery/drippie/dripchecks/CheckTrue.sol" +contract_hash = "2b589dfd0bfe371f99dcab24f21ff9dd60938561" +recorded_at = "2025-12-18T07:07:01Z" +devin_session_id = "1e3dcc3f9dae42a79ea72b0b30b2187f" +run_id = "20251218_070700" +reason = "Test coverage is already comprehensive. The check function has a fuzz test covering all possible inputs, and the name getter is tested. The contract has no branches, error conditions, or edge cases to add." + +[[tests]] +test_path = "test/universal/StandardBridge.t.sol" +contract_path = "src/universal/StandardBridge.sol" +contract_hash = "8d0dd96e494b2ba154587877351e87788336a4ec" +recorded_at = "2025-12-22T07:08:26Z" +devin_session_id = "f27067d230bf4a9fa4b4a96f59401a1f" +run_id = "20251222_070800" +reason = "Test coverage is already comprehensive. The file tests internal helper functions (_isOptimismMintableERC20, _isCorrectTokenPair) and default paused() behavior. Public bridging functions are tested in concrete implementations (L1StandardBridge.t.sol, L2StandardBridge.t.sol). No fuzz conversion opportunities exist as tests cover discrete token type categories, not value ranges." diff --git a/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md b/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md index 7ec2c73501f3e..53854e009be19 100644 --- a/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md +++ b/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md @@ -634,7 +634,7 @@ contract L1FeeVault_Version_Test { **INTERPRETING CI STATUS:** - Only investigate actual code failures: build errors, test failures, lint violations - "Code Review Requirements" status = waiting for reviewer approvals, not code issues -- Test-only changes cannot affect these CI jobs - skip them: `diff-asterisc-bytecode`, `op-program-compat` +- Test-only changes cannot affect these CI jobs - skip them: `op-program-compat` **ZERO TOLERANCE - CI FAILURES:** - vm.expectRevert() must ALWAYS have arguments: either selector or bytes message diff --git a/ops/ai-eng/graphite/rules.md b/ops/ai-eng/graphite/rules.md index 4fe2a0a676192..b41e72651b6bd 100644 --- a/ops/ai-eng/graphite/rules.md +++ b/ops/ai-eng/graphite/rules.md @@ -25,6 +25,13 @@ If the PR modifies `OPContractsManagerV2.sol` and changes the `version` constant This section applies to Solidity files ONLY. +### @dev Comments + +- Pay close attention to `@dev` natspec comments in the codebase +- These comments often contain important invariants, requirements, or reminders for developers +- When reviewing changes to a function, check if there are `@dev` comments that specify conditions or actions that must be taken when modifying that code +- Flag violations of instructions in `@dev` comments (e.g., "when updating this function, also update X") + ### Style Guide - Follow the style guide found at `.cursor/rules/solidity-styles.mdc` in the root of this repository. diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index f07beb6b4d820..51db686177fbf 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -9,7 +9,7 @@ # It will default to the target platform. ARG TARGET_BASE_IMAGE=alpine:3.20 -# The ubuntu target base image is used for the op-challenger build with kona and asterisc. +# The ubuntu target base image is used for the op-challenger build with kona. ARG UBUNTU_TARGET_BASE_IMAGE=ubuntu:22.04 # The version of kona to use. diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 6812d7ce551b0..e557d786997d6 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -25,13 +25,15 @@ compilation_restrictions = [ { paths = "src/dispute/v2/FaultDisputeGameV2.sol", optimizer_runs = 5000 }, { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 5000 }, { paths = "src/dispute/v2/PermissionedDisputeGameV2.sol", optimizer_runs = 5000 }, - { paths = "src/dispute/zk/OPSuccinctFaultDisputeGame.sol", optimizer_runs = 5000 }, { paths = "src/dispute/SuperFaultDisputeGame.sol", optimizer_runs = 5000 }, { paths = "src/dispute/SuperPermissionedDisputeGame.sol", optimizer_runs = 5000 }, { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 5000 }, { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 5000 }, { paths = "src/L1/opcm/OPContractsManagerV2.sol", optimizer_runs = 5000 }, { paths = "src/L1/opcm/OPContractsManagerContainer.sol", optimizer_runs = 5000 }, + { paths = "src/L1/opcm/OPContractsManagerMigrator.sol", optimizer_runs = 5000 }, + { paths = "src/L1/opcm/OPContractsManagerUtils.sol", optimizer_runs = 5000 }, + { paths = "src/L1/opcm/OPContractsManagerUtilsCaller.sol", optimizer_runs = 5000 }, { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 5000 }, { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 5000 }, { paths = "src/universal/StorageSetter.sol", optimizer_runs = 5000 } @@ -160,15 +162,18 @@ compilation_restrictions = [ { paths = "src/dispute/v2/FaultDisputeGameV2.sol", optimizer_runs = 0 }, { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 0 }, { paths = "src/dispute/v2/PermissionedDisputeGameV2.sol", optimizer_runs = 0 }, - { paths = "src/dispute/zk/OPSuccinctFaultDisputeGame.sol", optimizer_runs = 0 }, { paths = "src/dispute/SuperFaultDisputeGame.sol", optimizer_runs = 0 }, { paths = "src/dispute/SuperPermissionedDisputeGame.sol", optimizer_runs = 0 }, { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 0 }, { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 0 }, { paths = "src/L1/opcm/OPContractsManagerV2.sol", optimizer_runs = 0 }, { paths = "src/L1/opcm/OPContractsManagerContainer.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerMigrator.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtils.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtilsCaller.sol", optimizer_runs = 0 }, { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 0 }, { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 0 }, + { paths = "src/universal/StorageSetter.sol", optimizer_runs = 0 } ] ################################################################ diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol index 48c7ab9c6b58e..c3a799ecb0e54 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol @@ -300,6 +300,8 @@ interface IOPContractsManager { error InvalidDevFeatureAccess(bytes32 devFeature); + error OPContractsManager_V2Enabled(); + // -------- Methods -------- function __constructor__( diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol new file mode 100644 index 0000000000000..16596493bec92 --- /dev/null +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Libraries +import { GameType, Proposal } from "src/dispute/lib/Types.sol"; + +// Interfaces +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; + +interface IOPContractsManagerMigrator { + /// @notice Input for migrating one or more OP Stack chains to use the Super Root dispute games + /// and shared dispute game contracts. + struct MigrateInput { + ISystemConfig[] chainSystemConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; + Proposal startingAnchorRoot; + GameType startingRespectedGameType; + } + + /// @notice Thrown when a chain's ProxyAdmin owner does not match the other chains. + error OPContractsManagerMigrator_ProxyAdminOwnerMismatch(); + + /// @notice Thrown when a chain's SuperchainConfig does not match the other chains. + error OPContractsManagerMigrator_SuperchainConfigMismatch(); + + /// @notice Thrown when the starting respected game type is not a valid super game type. + error OPContractsManagerMigrator_InvalidStartingRespectedGameType(); + + /// @notice Returns the container of blueprint and implementation contract addresses. + function contractsContainer() external view returns (IOPContractsManagerContainer); + + /// @notice Returns the address of the OPContractsManagerUtils contract. + function opcmUtils() external view returns (IOPContractsManagerUtils); + + /// @notice Migrates one or more OP Stack chains to use the Super Root dispute games and shared + /// dispute game contracts. + /// @param _input The input parameters for the migration. + function migrate(MigrateInput calldata _input) external; + + function __constructor__( + IOPContractsManagerContainer _contractsContainer, + IOPContractsManagerUtils _utils + ) + external; +} diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol index 3c11d04240f45..a2e6d68c0b4d2 100644 --- a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol @@ -4,6 +4,10 @@ pragma solidity ^0.8.0; import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { Claim, GameType } from "src/dispute/lib/Types.sol"; interface IOPContractsManagerUtils { struct ProxyDeployArgs { @@ -18,11 +22,32 @@ interface IOPContractsManagerUtils { bytes data; } + /// @notice Configuration struct for the FaultDisputeGame. + struct FaultDisputeGameConfig { + Claim absolutePrestate; + } + + /// @notice Configuration struct for the PermissionedDisputeGame. + struct PermissionedDisputeGameConfig { + Claim absolutePrestate; + address proposer; + address challenger; + } + + /// @notice Generic dispute game configuration data. + struct DisputeGameConfig { + bool enabled; + uint256 initBond; + GameType gameType; + bytes gameArgs; + } + event ProxyCreation(string name, address proxy); error OPContractsManagerUtils_DowngradeNotAllowed(address _contract); error OPContractsManagerUtils_ConfigLoadFailed(string _name); error OPContractsManagerUtils_ProxyMustLoad(string _name); + error OPContractsManagerUtils_UnsupportedGameType(); error ReservedBitsSet(); error UnsupportedERCVersion(uint8 version); error SemverComp_InvalidSemverParts(); @@ -110,5 +135,17 @@ interface IOPContractsManagerUtils { ) external; + function getGameImpl(GameType _gameType) external view returns (IDisputeGame); + + function makeGameArgs( + uint256 _l2ChainId, + IAnchorStateRegistry _anchorStateRegistry, + IDelayedWETH _delayedWETH, + DisputeGameConfig memory _gcfg + ) + external + view + returns (bytes memory); + function __constructor__(IOPContractsManagerContainer _contractsContainer) external; } diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol index 94e5ad3ff7521..13ba23038ebb6 100644 --- a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.0; // Libraries -import { Claim, GameType, Proposal } from "src/dispute/lib/Types.sol"; +import { GameType, Proposal } from "src/dispute/lib/Types.sol"; // Interfaces import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; @@ -22,28 +22,9 @@ import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { IOPContractsManagerMigrator } from "interfaces/L1/opcm/IOPContractsManagerMigrator.sol"; interface IOPContractsManagerV2 { - /// @notice Configuration for the FaultDisputeGame. - struct FaultDisputeGameConfig { - Claim absolutePrestate; - } - - /// @notice Configuration for the PermissionedDisputeGame. - struct PermissionedDisputeGameConfig { - Claim absolutePrestate; - address proposer; - address challenger; - } - - /// @notice Dispute game configuration for a specific game type. - struct DisputeGameConfig { - bool enabled; - uint256 initBond; - GameType gameType; - bytes gameArgs; - } - /// @notice Contracts that represent the Superchain system. struct SuperchainContracts { ISuperchainConfig superchainConfig; @@ -80,18 +61,13 @@ interface IOPContractsManagerV2 { uint64 gasLimit; uint256 l2ChainId; IResourceMetering.ResourceConfig resourceConfig; - DisputeGameConfig[] disputeGameConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; bool useCustomGasToken; } - struct ExtraInstruction { - string key; - bytes data; - } - struct UpgradeInput { ISystemConfig systemConfig; - DisputeGameConfig[] disputeGameConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; IOPContractsManagerUtils.ExtraInstruction[] extraInstructions; } @@ -103,7 +79,6 @@ interface IOPContractsManagerV2 { error OPContractsManagerV2_InvalidGameConfigs(); error OPContractsManagerV2_InvalidUpgradeInput(); error OPContractsManagerV2_SuperchainConfigNeedsUpgrade(); - error OPContractsManagerV2_UnsupportedGameType(); error OPContractsManagerV2_InvalidUpgradeInstruction(string _key); error OPContractsManagerV2_CannotUpgradeToCustomGasToken(); error OPContractsManagerV2_InvalidUpgradeSequence(string _lastVersion, string _thisVersion); @@ -120,6 +95,7 @@ interface IOPContractsManagerV2 { function __constructor__( IOPContractsManagerContainer _contractsContainer, IOPContractsManagerStandardValidator _standardValidator, + IOPContractsManagerMigrator _migrator, IOPContractsManagerUtils _utils ) external; @@ -134,6 +110,8 @@ interface IOPContractsManagerV2 { function opcmV2() external view returns (IOPContractsManagerV2); + function opcmMigrator() external view returns (IOPContractsManagerMigrator); + function opcmUtils() external view returns (IOPContractsManagerUtils); function version() external view returns (string memory); @@ -147,6 +125,10 @@ interface IOPContractsManagerV2 { /// @notice Upgrades contracts on an existing OP Chain per the provided input. function upgrade(UpgradeInput memory _inp) external returns (ChainContracts memory); + /// @notice Migrates one or more OP Stack chains to use the Super Root dispute games and shared + /// dispute game contracts. + function migrate(IOPContractsManagerMigrator.MigrateInput calldata _input) external; + /// @notice Returns whether a development feature is enabled. function isDevFeatureEnabled(bytes32 _feature) external view returns (bool); diff --git a/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol index 85bb61606dea7..cae5a8fc9244e 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDisputeGame.sol @@ -13,6 +13,7 @@ interface IDisputeGame is IInitializable { function gameType() external view returns (GameType gameType_); function gameCreator() external pure returns (address creator_); function rootClaim() external pure returns (Claim rootClaim_); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim rootClaim_); function l1Head() external pure returns (Hash l1Head_); function l2SequenceNumber() external pure returns (uint256 l2SequenceNumber_); function extraData() external pure returns (bytes memory extraData_); diff --git a/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol index 86ace4d527077..cba277a4a26a6 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol @@ -74,6 +74,7 @@ interface IFaultDisputeGame is IDisputeGame { error UnexpectedList(); error UnexpectedRootClaim(Claim rootClaim); error UnexpectedString(); + error UnknownChainId(); error ValidStep(); error InvalidBondDistributionMode(); error GameNotFinalized(); @@ -121,6 +122,7 @@ interface IFaultDisputeGame is IDisputeGame { function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; function normalModeCredit(address) external view returns (uint256); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim rootClaim_); function resolutionCheckpoints(uint256) external view diff --git a/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol index 788c65790c030..92dc3ddeeec8d 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol @@ -63,6 +63,7 @@ interface IPermissionedDisputeGame is IDisputeGame { error UnexpectedList(); error UnexpectedRootClaim(Claim rootClaim); error UnexpectedString(); + error UnknownChainId(); error ValidStep(); error InvalidBondDistributionMode(); error GameNotFinalized(); @@ -111,6 +112,7 @@ interface IPermissionedDisputeGame is IDisputeGame { function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; function normalModeCredit(address) external view returns (uint256); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim rootClaim_); function resolutionCheckpoints(uint256) external view diff --git a/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol index 40f7b0e87cabd..314d1b1656e9b 100644 --- a/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol @@ -105,6 +105,7 @@ interface ISuperFaultDisputeGame is IDisputeGame { function normalModeCredit(address) external view returns (uint256); function l2SequenceNumber() external pure returns (uint256 l2SequenceNumber_); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim outputRootClaim_); function resolutionCheckpoints(uint256) external view @@ -121,8 +122,6 @@ interface ISuperFaultDisputeGame is IDisputeGame { function vm() external view returns (IBigStepper vm_); function wasRespectedGameTypeWhenCreated() external view returns (bool); function weth() external view returns (IDelayedWETH weth_); - // TODO(#18516): Remove once IDisputeGame includes this interface - function rootClaimByChainId(uint256 _chainId) external view returns (Claim outputRootClaim_); function __constructor__(GameConstructorParams memory _params) external; } diff --git a/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol index 06e9d2cbd2519..1af339e77c17b 100644 --- a/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol @@ -103,6 +103,7 @@ interface ISuperPermissionedDisputeGame is IDisputeGame { function normalModeCredit(address) external view returns (uint256); function l2SequenceNumber() external pure returns (uint256 l2SequenceNumber_); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim outputRootClaim_); function resolutionCheckpoints(uint256) external view @@ -119,8 +120,6 @@ interface ISuperPermissionedDisputeGame is IDisputeGame { function vm() external view returns (IBigStepper vm_); function wasRespectedGameTypeWhenCreated() external view returns (bool); function weth() external view returns (IDelayedWETH weth_); - // TODO(#18516): Remove once IDisputeGame includes this interface - function rootClaimByChainId(uint256 _chainId) external view returns (Claim outputRootClaim_); function __constructor__(ISuperFaultDisputeGame.GameConstructorParams memory _params) external; } diff --git a/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol b/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol index 3b31babbbabf1..35d896a1cb275 100644 --- a/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol +++ b/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol @@ -68,6 +68,7 @@ interface IFaultDisputeGameV2 is IDisputeGame { error UnexpectedList(); error UnexpectedRootClaim(Claim rootClaim); error UnexpectedString(); + error UnknownChainId(); error ValidStep(); error InvalidBondDistributionMode(); error GameNotFinalized(); @@ -114,6 +115,7 @@ interface IFaultDisputeGameV2 is IDisputeGame { function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; function normalModeCredit(address) external view returns (uint256); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim rootClaim_); function resolutionCheckpoints(uint256) external view diff --git a/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol b/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol index 7db5da9e2f7fa..52419eea9d3a0 100644 --- a/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol +++ b/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol @@ -63,6 +63,7 @@ interface IPermissionedDisputeGameV2 is IDisputeGame { error UnexpectedList(); error UnexpectedRootClaim(Claim rootClaim); error UnexpectedString(); + error UnknownChainId(); error ValidStep(); error InvalidBondDistributionMode(); error GameNotFinalized(); @@ -110,6 +111,7 @@ interface IPermissionedDisputeGameV2 is IDisputeGame { function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; function normalModeCredit(address) external view returns (uint256); function refundModeCredit(address) external view returns (uint256); + function rootClaimByChainId(uint256 _chainId) external pure returns (Claim rootClaim_); function resolutionCheckpoints(uint256) external view diff --git a/packages/contracts-bedrock/interfaces/dispute/zk/IOPSuccinctFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/zk/IOptimisticZkGame.sol similarity index 94% rename from packages/contracts-bedrock/interfaces/dispute/zk/IOPSuccinctFaultDisputeGame.sol rename to packages/contracts-bedrock/interfaces/dispute/zk/IOptimisticZkGame.sol index 9d852b5f76fb9..8ff83c79c8ced 100644 --- a/packages/contracts-bedrock/interfaces/dispute/zk/IOPSuccinctFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/zk/IOptimisticZkGame.sol @@ -18,9 +18,9 @@ import { ISP1Verifier } from "src/dispute/zk/ISP1Verifier.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { AccessManager } from "src/dispute/zk/AccessManager.sol"; -/// @title IOPSuccinctFaultDisputeGame -/// @notice Interface for the OPSuccinctFaultDisputeGame contract. -interface IOPSuccinctFaultDisputeGame is IDisputeGame, ISemver { +/// @title IOptimisticZkGame +/// @notice Interface for the OptimisticZkGame contract. +interface IOptimisticZkGame is IDisputeGame, ISemver { enum ProposalStatus { Unchallenged, Challenged, @@ -72,6 +72,7 @@ interface IOPSuccinctFaultDisputeGame is IDisputeGame, ISemver { function gameType() external view returns (GameType gameType_); function gameCreator() external pure returns (address creator_); function rootClaim() external pure returns (Claim rootClaim_); + function rootClaimByChainId(uint256) external pure returns (Claim rootClaim_); function l1Head() external pure returns (Hash l1Head_); function extraData() external pure returns (bytes memory extraData_); function gameData() external view returns (GameType gameType_, Claim rootClaim_, bytes memory extraData_); diff --git a/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol b/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol deleted file mode 100644 index d69a22dfb0c45..0000000000000 --- a/packages/contracts-bedrock/interfaces/vendor/asterisc/IRISCV.sol +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { ISemver } from "interfaces/universal/ISemver.sol"; -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; - -/// @title IRISCV -/// @notice Interface for the RISCV contract. -interface IRISCV is ISemver { - function oracle() external view returns (IPreimageOracle); - function step(bytes memory _stateData, bytes memory _proof, bytes32 _localContext) external returns (bytes32); - - function __constructor__(IPreimageOracle _oracle) external; -} diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 94aab76a70bbf..5168203a81099 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -85,20 +85,25 @@ test-dev *ARGS: build-go-ffi FOUNDRY_PROFILE=lite forge test {{ARGS}} # Default block number for the forked upgrade path. - -export sepoliaBlockNumber := "9366100" -export mainnetBlockNumber := "23530400" - -export pinnedBlockNumber := if env_var_or_default("FORK_BASE_CHAIN", "") == "mainnet" { - mainnetBlockNumber -} else if env_var_or_default("FORK_BASE_CHAIN", "") == "sepolia" { - sepoliaBlockNumber -} else { - mainnetBlockNumber -} - +# Block numbers are calculated deterministically based on the current week. +# The block is set to approximately Sunday 00:00 UTC of each week. +# This allows for consistent caching while automatically updating weekly. +# +# Note: We use a recipe instead of top-level exports to avoid eager evaluation +# of both chains' block numbers when only one is needed. + +# Calculates and prints the pinned block number for the current ETH_RPC_URL. +# Uses the most recent Sunday at 00:00 UTC as the target timestamp. print-pinned-block-number: - echo $pinnedBlockNumber + #!/usr/bin/env bash + set -euo pipefail + # Calculate most recent Sunday at 00:00 UTC + now=$(date -u +%s) + dow=$(date -u +%w) # 0=Sunday + h=$(date -u +%H); m=$(date -u +%M); s=$(date -u +%S) + secs_since_midnight=$(( 10#$h * 3600 + 10#$m * 60 + 10#$s )) + sunday_midnight=$(( now - secs_since_midnight - dow * 86400 )) + cast find-block "$sunday_midnight" --rpc-url $ETH_RPC_URL # Prepares the environment for upgrade path variant of contract tests and coverage. # Env Vars: @@ -109,7 +114,9 @@ print-pinned-block-number: # when the L1 chain is upgraded. prepare-upgrade-env *ARGS : build-go-ffi #!/bin/bash - export FORK_BLOCK_NUMBER=$pinnedBlockNumber + set -euo pipefail + pinnedBlock=$(just print-pinned-block-number) + export FORK_BLOCK_NUMBER="${FORK_BLOCK_NUMBER:-$pinnedBlock}" echo "Running upgrade tests at block $FORK_BLOCK_NUMBER" export FORK_RPC_URL=$ETH_RPC_URL export FORK_RETRIES=10 @@ -139,8 +146,10 @@ anvil-fork: # Helpful for debugging. test-upgrade-against-anvil *ARGS: build-go-ffi #!/bin/bash - echo "Running upgrade tests at block $pinnedBlockNumber" - export FORK_BLOCK_NUMBER=$pinnedBlockNumber + set -euo pipefail + pinnedBlock=$(just print-pinned-block-number) + echo "Running upgrade tests at block $pinnedBlock" + export FORK_BLOCK_NUMBER="${FORK_BLOCK_NUMBER:-$pinnedBlock}" export FORK_RPC_URL=http://127.0.0.1:8545 export FORK_TEST=true forge test {{ARGS}} \ diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh index 6a0389b4f2880..8eb448cbc85e7 100755 --- a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh +++ b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh @@ -13,7 +13,6 @@ SEMVER_LOCK="snapshots/semver-lock.json" # Define excluded contracts. EXCLUDED_CONTRACTS=( - "src/vendor/asterisc/RISCV.sol" ) # Helper function to check if a contract is excluded. diff --git a/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go index 76f886c29c47f..119fa6c3c8a10 100644 --- a/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go +++ b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go @@ -13,7 +13,7 @@ import ( func main() { if _, err := common.ProcessFilesGlob( []string{"forge-artifacts/**/*.json"}, - []string{"forge-artifacts/L2StandardBridgeInterop.sol/**.json", "forge-artifacts/OptimismPortalInterop.sol/**.json", "forge-artifacts/RISCV.sol/**.json", "forge-artifacts/EAS.sol/**.json", "forge-artifacts/SchemaRegistry.sol/**.json", "forge-artifacts/L1BlockCGT.sol/**.json", "forge-artifacts/L2ToL1MessagePasserCGT.sol/**.json"}, + []string{"forge-artifacts/L2StandardBridgeInterop.sol/**.json", "forge-artifacts/OptimismPortalInterop.sol/**.json", "forge-artifacts/EAS.sol/**.json", "forge-artifacts/SchemaRegistry.sol/**.json", "forge-artifacts/L1BlockCGT.sol/**.json", "forge-artifacts/L2ToL1MessagePasserCGT.sol/**.json"}, processFile, ); err != nil { fmt.Printf("Error: %v/n", err) diff --git a/packages/contracts-bedrock/scripts/deploy/AddGameType.s.sol b/packages/contracts-bedrock/scripts/deploy/AddGameType.s.sol index f4956c6b4bdf5..07579ed32166e 100644 --- a/packages/contracts-bedrock/scripts/deploy/AddGameType.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/AddGameType.s.sol @@ -16,6 +16,8 @@ import { GameType, Duration, Claim } from "src/dispute/lib/Types.sol"; import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; /// @title AddGameType +/// @notice This script is used to add a new game type to the chain using the OPContractsManager V1. +/// Support for OPCM v2 is provided through the UpgradeOPChain script. contract AddGameType is Script { struct Input { // Address that will be used for the DummyCaller contract diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index 0ac35465ff89f..ec618a04bd1b7 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -16,6 +16,7 @@ import { Types } from "scripts/libraries/Types.sol"; import { Blueprint } from "src/libraries/Blueprint.sol"; import { GameTypes } from "src/dispute/lib/Types.sol"; import { Hash } from "src/dispute/lib/Types.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; @@ -35,6 +36,8 @@ import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; library ChainAssertions { Vm internal constant vm = Vm(0x7109709ECfa91a80626fF3989D68f67F5b1DD12D); @@ -109,10 +112,19 @@ library ChainAssertions { require(config.scalar() >> 248 == 1, "CHECK-SCFG-70"); // Depends on start block being set to 0 in `initialize` require(config.startBlock() == block.number, "CHECK-SCFG-140"); - require( - config.batchInbox() == IOPContractsManager(_doi.opcm).chainIdToBatchInboxAddress(_doi.l2ChainId), - "CHECK-SCFG-150" - ); + if (IOPContractsManager(_doi.opcm).isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + require( + config.batchInbox() + == IOPContractsManagerUtils(IOPContractsManagerV2(address(_doi.opcm)).opcmUtils()) + .chainIdToBatchInboxAddress(_doi.l2ChainId), + "CHECK-SCFG-150" + ); + } else { + require( + config.batchInbox() == IOPContractsManager(_doi.opcm).chainIdToBatchInboxAddress(_doi.l2ChainId), + "CHECK-SCFG-150" + ); + } // Check _addresses require(config.l1CrossDomainMessenger() == _contracts.L1CrossDomainMessenger, "CHECK-SCFG-160"); require(config.l1ERC721Bridge() == _contracts.L1ERC721Bridge, "CHECK-SCFG-170"); @@ -385,9 +397,10 @@ library ChainAssertions { require(address(_opcm) != address(0), "CHECK-OPCM-10"); require(bytes(_opcm.version()).length > 0, "CHECK-OPCM-15"); - require(address(_opcm.protocolVersions()) == _proxies.ProtocolVersions, "CHECK-OPCM-17"); - require(address(_opcm.superchainConfig()) == _proxies.SuperchainConfig, "CHECK-OPCM-19"); - + if (!_opcm.isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + require(address(_opcm.protocolVersions()) == _proxies.ProtocolVersions, "CHECK-OPCM-17"); + require(address(_opcm.superchainConfig()) == _proxies.SuperchainConfig, "CHECK-OPCM-19"); + } // Ensure that the OPCM impls are correctly saved IOPContractsManager.Implementations memory impls = _opcm.implementations(); require(impls.l1ERC721BridgeImpl == _impls.L1ERC721Bridge, "CHECK-OPCM-50"); diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 3467c24dfffc9..72f4b6db35f8c 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -29,6 +29,7 @@ import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; @@ -301,7 +302,7 @@ contract Deploy is Deployer { // Save the implementation addresses which are needed outside of this function or script. // When called in a fork test, this will overwrite the existing implementations. artifacts.save("MipsSingleton", address(dio.mipsSingleton)); - if (DevFeatures.isDevFeatureEnabled(dio.opcm.devFeatureBitmap(), DevFeatures.OPCM_V2)) { + if (DevFeatures.isDevFeatureEnabled(cfg.devFeatureBitmap(), DevFeatures.OPCM_V2)) { artifacts.save("OPContractsManagerV2", address(dio.opcmV2)); } else { artifacts.save("OPContractsManager", address(dio.opcm)); @@ -336,10 +337,16 @@ contract Deploy is Deployer { _mips: IMIPS64(address(dio.mipsSingleton)), _oracle: IPreimageOracle(address(dio.preimageOracleSingleton)) }); + IOPContractsManager _opcm; + if (DevFeatures.isDevFeatureEnabled(cfg.devFeatureBitmap(), DevFeatures.OPCM_V2)) { + _opcm = IOPContractsManager(address(dio.opcmV2)); + } else { + _opcm = IOPContractsManager(address(dio.opcm)); + } ChainAssertions.checkOPContractsManager({ _impls: impls, _proxies: _proxies(), - _opcm: IOPContractsManager(address(dio.opcm)), + _opcm: _opcm, _mips: IMIPS64(address(dio.mipsSingleton)) }); ChainAssertions.checkSystemConfigImpls(impls); @@ -490,36 +497,36 @@ contract Deploy is Deployer { } function getDeployInputV2() public view returns (IOPContractsManagerV2.FullConfig memory) { - IOPContractsManagerV2.DisputeGameConfig[] memory disputeGameConfigs = - new IOPContractsManagerV2.DisputeGameConfig[](3); - disputeGameConfigs[0] = IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](3); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: false, initBond: 0, gameType: GameTypes.CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.FaultDisputeGameConfig({ + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())) }) ) }); - disputeGameConfigs[1] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[1] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: 0, gameType: GameTypes.PERMISSIONED_CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())), proposer: cfg.l2OutputOracleProposer(), challenger: cfg.l2OutputOracleChallenger() }) ) }); - disputeGameConfigs[2] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[2] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: false, initBond: 0, gameType: GameTypes.CANNON_KONA, gameArgs: abi.encode( - IOPContractsManagerV2.FaultDisputeGameConfig({ + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(cfg.faultGameAbsolutePrestate())) }) ) diff --git a/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol deleted file mode 100644 index ba55b674cffce..0000000000000 --- a/packages/contracts-bedrock/scripts/deploy/DeployAsterisc.s.sol +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Forge -import { Script } from "forge-std/Script.sol"; - -// Scripts -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - -// Interfaces -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IRISCV } from "interfaces/vendor/asterisc/IRISCV.sol"; - -/// @title DeployAsterisc -contract DeployAsterisc is Script { - struct Input { - IPreimageOracle preimageOracle; - } - - struct Output { - IRISCV asteriscSingleton; - } - - function run(Input memory _input) public returns (Output memory output_) { - assertValidInput(_input); - - deployAsteriscSingleton(_input, output_); - - assertValidOutput(_input, output_); - } - - function deployAsteriscSingleton(Input memory _input, Output memory _output) internal { - vm.broadcast(msg.sender); - IRISCV singleton = IRISCV( - DeployUtils.create1({ - _name: "RISCV", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IRISCV.__constructor__, (_input.preimageOracle))) - }) - ); - - vm.label(address(singleton), "AsteriscSingleton"); - _output.asteriscSingleton = singleton; - } - - function assertValidInput(Input memory _input) internal pure { - require(address(_input.preimageOracle) != address(0), "DeployAsterisc: preimageOracle not set"); - } - - function assertValidOutput(Input memory _input, Output memory _output) internal view { - DeployUtils.assertValidContractAddress(address(_output.asteriscSingleton)); - - require( - _output.asteriscSingleton.oracle() == _input.preimageOracle, - "DeployAsterisc: preimageOracle does not match input" - ); - } -} diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index d6d660ff64bc3..c65048e860092 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -32,6 +32,7 @@ import { import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { IOPContractsManagerMigrator } from "interfaces/L1/opcm/IOPContractsManagerMigrator.sol"; import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; @@ -79,6 +80,7 @@ contract DeployImplementations is Script { IOPContractsManagerInteropMigrator opcmInteropMigrator; IOPContractsManagerStandardValidator opcmStandardValidator; IOPContractsManagerUtils opcmUtils; + IOPContractsManagerMigrator opcmMigrator; IOPContractsManagerV2 opcmV2; IOPContractsManagerContainer opcmContainer; // v2 container IDelayedWETH delayedWETHImpl; @@ -179,7 +181,41 @@ contract DeployImplementations is Script { superPermissionedDisputeGameImpl: address(_output.superPermissionedDisputeGameImpl) }); - IOPContractsManagerContainer.Implementations memory implementationsV2 = IOPContractsManagerContainer + // Deploy OPCM V1 components + deployOPCMBPImplsContainer(_input, _output, _blueprints, implementations); + deployOPCMGameTypeAdder(_output); + deployOPCMDeployer(_input, _output); + deployOPCMUpgrader(_output); + deployOPCMInteropMigrator(_output); + deployOPCMStandardValidator(_input, _output, implementations); + + // Semgrep rule will fail because the arguments are encoded inside of a separate function. + opcm_ = IOPContractsManager( + // nosemgrep: sol-safety-deployutils-args + DeployUtils.createDeterministic({ + _name: "OPContractsManager", + _args: encodeOPCMConstructor(_input, _output), + _salt: _salt + }) + ); + + vm.label(address(opcm_), "OPContractsManager"); + _output.opcm = opcm_; + + // Set OPCM V2 addresses to zero (not deployed) + _output.opcmV2 = IOPContractsManagerV2(address(0)); + _output.opcmContainer = IOPContractsManagerContainer(address(0)); + } + + function createOPCMContractV2( + Input memory _input, + Output memory _output, + IOPContractsManager.Blueprints memory _blueprints + ) + private + returns (IOPContractsManagerV2 opcmV2_) + { + IOPContractsManagerContainer.Implementations memory implementations = IOPContractsManagerContainer .Implementations({ superchainConfigImpl: address(_output.superchainConfigImpl), protocolVersionsImpl: address(_output.protocolVersionsImpl), @@ -203,7 +239,7 @@ contract DeployImplementations is Script { }); // Convert blueprints to V2 blueprints - IOPContractsManagerContainer.Blueprints memory blueprintsV2 = IOPContractsManagerContainer.Blueprints({ + IOPContractsManagerContainer.Blueprints memory blueprints = IOPContractsManagerContainer.Blueprints({ addressManager: _blueprints.addressManager, proxy: _blueprints.proxy, proxyAdmin: _blueprints.proxyAdmin, @@ -211,28 +247,22 @@ contract DeployImplementations is Script { resolvedDelegateProxy: _blueprints.resolvedDelegateProxy }); - deployOPCMBPImplsContainer(_input, _output, _blueprints, implementations); - deployOPCMContainer(_input, _output, blueprintsV2, implementationsV2); - deployOPCMGameTypeAdder(_output); - deployOPCMDeployer(_input, _output); - deployOPCMUpgrader(_output); - deployOPCMInteropMigrator(_output); - deployOPCMStandardValidator(_input, _output, implementations); + // Deploy OPCM V2 components + deployOPCMContainer(_input, _output, blueprints, implementations); + deployOPCMStandardValidatorV2(_input, _output, implementations); deployOPCMUtils(_output); - deployOPCMV2(_output); - - // Semgrep rule will fail because the arguments are encoded inside of a separate function. - opcm_ = IOPContractsManager( - // nosemgrep: sol-safety-deployutils-args - DeployUtils.createDeterministic({ - _name: "OPContractsManager", - _args: encodeOPCMConstructor(_input, _output), - _salt: _salt - }) - ); - - vm.label(address(opcm_), "OPContractsManager"); - _output.opcm = opcm_; + deployOPCMMigrator(_output); + opcmV2_ = deployOPCMV2(_output); + + // Set OPCM V1 addresses to zero (not deployed) + _output.opcm = IOPContractsManager(address(0)); + _output.opcmContractsContainer = IOPContractsManagerContractsContainer(address(0)); + _output.opcmGameTypeAdder = IOPContractsManagerGameTypeAdder(address(0)); + _output.opcmDeployer = IOPContractsManagerDeployer(address(0)); + _output.opcmUpgrader = IOPContractsManagerUpgrader(address(0)); + _output.opcmInteropMigrator = IOPContractsManagerInteropMigrator(address(0)); + + return opcmV2_; } /// @notice Encodes the constructor of the OPContractsManager contract. Used to avoid stack too @@ -283,10 +313,18 @@ contract DeployImplementations is Script { // forgefmt: disable-end vm.stopBroadcast(); - IOPContractsManager opcm = createOPCMContract(_input, _output, blueprints); - - vm.label(address(opcm), "OPContractsManager"); - _output.opcm = opcm; + // Check if OPCM V2 should be deployed + bool deployV2 = DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.OPCM_V2); + + if (deployV2) { + IOPContractsManagerV2 opcmV2 = createOPCMContractV2(_input, _output, blueprints); + vm.label(address(opcmV2), "OPContractsManagerV2"); + _output.opcmV2 = opcmV2; + } else { + IOPContractsManager opcm = createOPCMContract(_input, _output, blueprints); + vm.label(address(opcm), "OPContractsManager"); + _output.opcm = opcm; + } } // --- Core Contracts --- @@ -769,21 +807,80 @@ contract DeployImplementations is Script { _output.opcmUtils = impl; } - function deployOPCMV2(Output memory _output) private { - IOPContractsManagerV2 impl = IOPContractsManagerV2( + function deployOPCMMigrator(Output memory _output) private { + IOPContractsManagerMigrator impl = IOPContractsManagerMigrator( + DeployUtils.createDeterministic({ + _name: "OPContractsManagerMigrator.sol:OPContractsManagerMigrator", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IOPContractsManagerMigrator.__constructor__, (_output.opcmContainer, _output.opcmUtils)) + ), + _salt: _salt + }) + ); + vm.label(address(impl), "OPContractsManagerMigratorImpl"); + _output.opcmMigrator = impl; + } + + function deployOPCMStandardValidatorV2( + Input memory _input, + Output memory _output, + IOPContractsManagerContainer.Implementations memory _implementations + ) + private + { + IOPContractsManagerStandardValidator.Implementations memory opcmImplementations; + opcmImplementations.l1ERC721BridgeImpl = _implementations.l1ERC721BridgeImpl; + opcmImplementations.optimismPortalImpl = _implementations.optimismPortalImpl; + opcmImplementations.optimismPortalInteropImpl = _implementations.optimismPortalInteropImpl; + opcmImplementations.ethLockboxImpl = _implementations.ethLockboxImpl; + opcmImplementations.systemConfigImpl = _implementations.systemConfigImpl; + opcmImplementations.optimismMintableERC20FactoryImpl = _implementations.optimismMintableERC20FactoryImpl; + opcmImplementations.l1CrossDomainMessengerImpl = _implementations.l1CrossDomainMessengerImpl; + opcmImplementations.l1StandardBridgeImpl = _implementations.l1StandardBridgeImpl; + opcmImplementations.disputeGameFactoryImpl = _implementations.disputeGameFactoryImpl; + opcmImplementations.anchorStateRegistryImpl = _implementations.anchorStateRegistryImpl; + opcmImplementations.delayedWETHImpl = _implementations.delayedWETHImpl; + opcmImplementations.mipsImpl = _implementations.mipsImpl; + opcmImplementations.faultDisputeGameImpl = _implementations.faultDisputeGameV2Impl; + opcmImplementations.permissionedDisputeGameImpl = _implementations.permissionedDisputeGameV2Impl; + + IOPContractsManagerStandardValidator impl = IOPContractsManagerStandardValidator( + DeployUtils.createDeterministic({ + _name: "OPContractsManagerStandardValidator.sol:OPContractsManagerStandardValidator", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IOPContractsManagerStandardValidator.__constructor__, + ( + opcmImplementations, + _input.superchainConfigProxy, + _input.l1ProxyAdminOwner, + _input.challenger, + _input.withdrawalDelaySeconds, + _input.devFeatureBitmap + ) + ) + ), + _salt: _salt + }) + ); + vm.label(address(impl), "OPContractsManagerStandardValidatorImpl"); + _output.opcmStandardValidator = impl; + } + + function deployOPCMV2(Output memory _output) private returns (IOPContractsManagerV2 opcmV2_) { + opcmV2_ = IOPContractsManagerV2( DeployUtils.createDeterministic({ - _name: "OPContractsManagerV2.sol:OPContractsManagerV2", + _name: "OPContractsManagerV2", _args: DeployUtils.encodeConstructor( abi.encodeCall( IOPContractsManagerV2.__constructor__, - (_output.opcmContainer, _output.opcmStandardValidator, _output.opcmUtils) + (_output.opcmContainer, _output.opcmStandardValidator, _output.opcmMigrator, _output.opcmUtils) ) ), _salt: _salt }) ); - vm.label(address(impl), "OPContractsManagerV2Impl"); - _output.opcmV2 = impl; + vm.label(address(opcmV2_), "OPContractsManagerV2"); } function deployStorageSetterImpl(Output memory _output) private { @@ -851,8 +948,12 @@ contract DeployImplementations is Script { function assertValidOutput(Input memory _input, Output memory _output) private { // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. + + // Check which OPCM version was deployed + bool deployedV2 = DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.OPCM_V2); + address[] memory addrs1 = Solarray.addresses( - address(_output.opcm), + deployedV2 ? address(_output.opcmV2) : address(_output.opcm), address(_output.optimismPortalImpl), address(_output.delayedWETHImpl), address(_output.preimageOracleSingleton), @@ -883,6 +984,27 @@ contract DeployImplementations is Script { DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); + // Validate OPCM V2 flag + if (DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.OPCM_V2)) { + require( + address(_output.opcmV2) != address(0), + "DeployImplementations: OPCM V2 flag enabled but OPCM V2 not deployed" + ); + require( + address(_output.opcm) == address(0), + "DeployImplementations: OPCM V2 flag enabled but OPCM V1 was deployed" + ); + } else { + require( + address(_output.opcm) != address(0), + "DeployImplementations: OPCM V2 flag disabled but OPCM V1 not deployed" + ); + require( + address(_output.opcmV2) == address(0), + "DeployImplementations: OPCM V2 flag disabled but OPCM V2 was deployed" + ); + } + if (!DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.OPTIMISM_PORTAL_INTEROP)) { require( address(_output.superFaultDisputeGameImpl) == address(0), @@ -909,15 +1031,18 @@ contract DeployImplementations is Script { ChainAssertions.checkL1StandardBridgeImpl(_output.l1StandardBridgeImpl); ChainAssertions.checkMIPS(_output.mipsSingleton, _output.preimageOracleSingleton); - Types.ContractSet memory proxies; - proxies.SuperchainConfig = address(_input.superchainConfigProxy); - proxies.ProtocolVersions = address(_input.protocolVersionsProxy); - ChainAssertions.checkOPContractsManager({ - _impls: impls, - _proxies: proxies, - _opcm: IOPContractsManager(address(_output.opcm)), - _mips: IMIPS64(address(_output.mipsSingleton)) - }); + // Only check OPCM V1 if it was deployed + if (!DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.OPCM_V2)) { + Types.ContractSet memory proxies; + proxies.SuperchainConfig = address(_input.superchainConfigProxy); + proxies.ProtocolVersions = address(_input.protocolVersionsProxy); + ChainAssertions.checkOPContractsManager({ + _impls: impls, + _proxies: proxies, + _opcm: IOPContractsManager(address(_output.opcm)), + _mips: IMIPS64(address(_output.mipsSingleton)) + }); + } ChainAssertions.checkOptimismMintableERC20FactoryImpl(_output.optimismMintableERC20FactoryImpl); ChainAssertions.checkOptimismPortal2({ diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol index 2f5ff24e752da..62f06706e4a73 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol @@ -3,6 +3,8 @@ pragma solidity 0.8.15; import { Script } from "forge-std/Script.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { Constants } from "src/libraries/Constants.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; @@ -11,6 +13,8 @@ import { Types } from "scripts/libraries/Types.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; @@ -24,7 +28,7 @@ import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; -import { IOPContractsManager } from "../../interfaces/L1/IOPContractsManager.sol"; +import { GameTypes } from "src/dispute/lib/Types.sol"; contract DeployOPChain is Script { struct Output { @@ -54,8 +58,66 @@ contract DeployOPChain is Script { function run(Types.DeployOPChainInput memory _input) public returns (Output memory output_) { checkInput(_input); - IOPContractsManager opcm = IOPContractsManager(_input.opcm); + // Check if OPCM v2 should be used. + bool useV2 = isDevFeatureOpcmV2Enabled(_input.opcm); + if (useV2) { + IOPContractsManagerV2 opcmV2 = IOPContractsManagerV2(_input.opcm); + IOPContractsManagerV2.FullConfig memory config = toOPCMV2DeployInput(_input); + + vm.broadcast(msg.sender); + IOPContractsManagerV2.ChainContracts memory chainContracts = opcmV2.deploy(config); + output_ = fromOPCMV2OutputToOutput(chainContracts); + } else { + IOPContractsManager opcm = IOPContractsManager(_input.opcm); + IOPContractsManager.DeployInput memory deployInput = toOPCMV1DeployInput(_input); + + vm.broadcast(msg.sender); + IOPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); + + output_ = fromOPCMV1OutputToOutput(deployOutput); + } + + checkOutput(_input, output_); + + vm.label(address(output_.opChainProxyAdmin), "opChainProxyAdmin"); + vm.label(address(output_.addressManager), "addressManager"); + vm.label(address(output_.l1ERC721BridgeProxy), "l1ERC721BridgeProxy"); + vm.label(address(output_.systemConfigProxy), "systemConfigProxy"); + vm.label(address(output_.optimismMintableERC20FactoryProxy), "optimismMintableERC20FactoryProxy"); + vm.label(address(output_.l1StandardBridgeProxy), "l1StandardBridgeProxy"); + vm.label(address(output_.l1CrossDomainMessengerProxy), "l1CrossDomainMessengerProxy"); + vm.label(address(output_.optimismPortalProxy), "optimismPortalProxy"); + vm.label(address(output_.ethLockboxProxy), "ethLockboxProxy"); + vm.label(address(output_.disputeGameFactoryProxy), "disputeGameFactoryProxy"); + vm.label(address(output_.anchorStateRegistryProxy), "anchorStateRegistryProxy"); + vm.label(address(output_.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); + // TODO: Eventually switch from Permissioned to Permissionless. + // vm.label(address(output_.faultDisputeGame), "faultDisputeGame"); + // vm.label(address(output_.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); + } + + // -------- Features -------- + + /// @notice Checks if OPCM v2 dev feature flag is enabled from the contract's dev feature bitmap. + function isDevFeatureOpcmV2Enabled(address _opcmAddr) internal view returns (bool) { + // Both v1 and v2 share the same interface for this function. + return IOPContractsManager(_opcmAddr).isDevFeatureEnabled(DevFeatures.OPCM_V2); + } + + function isDevFeatureV2DisputeGamesEnabled(address _opcmAddr) internal view returns (bool) { + IOPContractsManager opcm = IOPContractsManager(_opcmAddr); + return DevFeatures.isDevFeatureEnabled(opcm.devFeatureBitmap(), DevFeatures.DEPLOY_V2_DISPUTE_GAMES); + } + + /// @notice Converts Types.DeployOPChainInput to IOPContractsManager.DeployInput. + /// @param _input The input parameters. + /// @return deployInput_ The deployed input parameters. + function toOPCMV1DeployInput(Types.DeployOPChainInput memory _input) + internal + pure + returns (IOPContractsManager.DeployInput memory deployInput_) + { IOPContractsManager.Roles memory roles = IOPContractsManager.Roles({ opChainProxyAdminOwner: _input.opChainProxyAdminOwner, systemConfigOwner: _input.systemConfigOwner, @@ -64,7 +126,7 @@ contract DeployOPChain is Script { proposer: _input.proposer, challenger: _input.challenger }); - IOPContractsManager.DeployInput memory deployInput = IOPContractsManager.DeployInput({ + deployInput_ = IOPContractsManager.DeployInput({ roles: roles, basefeeScalar: _input.basefeeScalar, blobBasefeeScalar: _input.blobBaseFeeScalar, @@ -80,46 +142,133 @@ contract DeployOPChain is Script { disputeMaxClockDuration: _input.disputeMaxClockDuration, useCustomGasToken: _input.useCustomGasToken }); + } - vm.broadcast(msg.sender); - IOPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); - - vm.label(address(deployOutput.opChainProxyAdmin), "opChainProxyAdmin"); - vm.label(address(deployOutput.addressManager), "addressManager"); - vm.label(address(deployOutput.l1ERC721BridgeProxy), "l1ERC721BridgeProxy"); - vm.label(address(deployOutput.systemConfigProxy), "systemConfigProxy"); - vm.label(address(deployOutput.optimismMintableERC20FactoryProxy), "optimismMintableERC20FactoryProxy"); - vm.label(address(deployOutput.l1StandardBridgeProxy), "l1StandardBridgeProxy"); - vm.label(address(deployOutput.l1CrossDomainMessengerProxy), "l1CrossDomainMessengerProxy"); - vm.label(address(deployOutput.optimismPortalProxy), "optimismPortalProxy"); - vm.label(address(deployOutput.ethLockboxProxy), "ethLockboxProxy"); - vm.label(address(deployOutput.disputeGameFactoryProxy), "disputeGameFactoryProxy"); - vm.label(address(deployOutput.anchorStateRegistryProxy), "anchorStateRegistryProxy"); - vm.label(address(deployOutput.permissionedDisputeGame), "permissionedDisputeGame"); - vm.label(address(deployOutput.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); - // TODO: Eventually switch from Permissioned to Permissionless. - // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); - // vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); + /// @notice Converts Types.DeployOPChainInput to IOPContractsManagerV2.FullConfig. + /// @param _input The input parameters. + /// @return config_ The deployed input parameters. + function toOPCMV2DeployInput(Types.DeployOPChainInput memory _input) + internal + pure + returns (IOPContractsManagerV2.FullConfig memory config_) + { + // Build dispute game configs - OPCMV2 requires exactly 3 configs: CANNON, PERMISSIONED_CANNON, CANNON_KONA + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](3); + + // Determine which games should be enabled based on the starting respected game type + bool cannonEnabled = _input.disputeGameType.raw() == GameTypes.CANNON.raw(); + bool permissionedCannonEnabled = true; // PERMISSIONED_CANNON must always be enabled + bool cannonKonaEnabled = _input.disputeGameType.raw() == GameTypes.CANNON_KONA.raw(); + + // Config 0: CANNON + IOPContractsManagerUtils.FaultDisputeGameConfig memory cannonConfig = + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: _input.disputeAbsolutePrestate }); + + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: cannonEnabled, + initBond: cannonEnabled ? 0.08 ether : 0, // Standard init bond if enabled + gameType: GameTypes.CANNON, + gameArgs: abi.encode(cannonConfig) + }); + + // Config 1: PERMISSIONED_CANNON (must be enabled) + IOPContractsManagerUtils.PermissionedDisputeGameConfig memory pdgConfig = IOPContractsManagerUtils + .PermissionedDisputeGameConfig({ + absolutePrestate: _input.disputeAbsolutePrestate, + proposer: _input.proposer, + challenger: _input.challenger + }); + disputeGameConfigs[1] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: permissionedCannonEnabled, + initBond: 0.08 ether, // Standard init bond + gameType: GameTypes.PERMISSIONED_CANNON, + gameArgs: abi.encode(pdgConfig) + }); + + // Config 2: CANNON_KONA + IOPContractsManagerUtils.FaultDisputeGameConfig memory cannonKonaConfig = + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: _input.disputeAbsolutePrestate }); + + disputeGameConfigs[2] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: cannonKonaEnabled, + initBond: cannonKonaEnabled ? 0.08 ether : 0, // Standard init bond if enabled + gameType: GameTypes.CANNON_KONA, + gameArgs: abi.encode(cannonKonaConfig) + }); + + config_ = IOPContractsManagerV2.FullConfig({ + saltMixer: _input.saltMixer, + superchainConfig: _input.superchainConfig, + proxyAdminOwner: _input.opChainProxyAdminOwner, + systemConfigOwner: _input.systemConfigOwner, + unsafeBlockSigner: _input.unsafeBlockSigner, + batcher: _input.batcher, + startingAnchorRoot: ScriptConstants.DEFAULT_OUTPUT_ROOT(), + startingRespectedGameType: _input.disputeGameType, + basefeeScalar: _input.basefeeScalar, + blobBasefeeScalar: _input.blobBaseFeeScalar, + gasLimit: _input.gasLimit, + l2ChainId: _input.l2ChainId, + resourceConfig: Constants.DEFAULT_RESOURCE_CONFIG(), + disputeGameConfigs: disputeGameConfigs, + useCustomGasToken: _input.useCustomGasToken + }); + } + + /// @notice Converts IOPContractsManagerV2.ChainContracts to Output. + /// @param _chainContracts The chain contracts. + /// @return output_ The output parameters. + function fromOPCMV2OutputToOutput(IOPContractsManagerV2.ChainContracts memory _chainContracts) + internal + pure + returns (Output memory output_) + { output_ = Output({ - opChainProxyAdmin: deployOutput.opChainProxyAdmin, - addressManager: deployOutput.addressManager, - l1ERC721BridgeProxy: deployOutput.l1ERC721BridgeProxy, - systemConfigProxy: deployOutput.systemConfigProxy, - optimismMintableERC20FactoryProxy: deployOutput.optimismMintableERC20FactoryProxy, - l1StandardBridgeProxy: deployOutput.l1StandardBridgeProxy, - l1CrossDomainMessengerProxy: deployOutput.l1CrossDomainMessengerProxy, - optimismPortalProxy: deployOutput.optimismPortalProxy, - ethLockboxProxy: deployOutput.ethLockboxProxy, - disputeGameFactoryProxy: deployOutput.disputeGameFactoryProxy, - anchorStateRegistryProxy: deployOutput.anchorStateRegistryProxy, - faultDisputeGame: deployOutput.faultDisputeGame, - permissionedDisputeGame: deployOutput.permissionedDisputeGame, - delayedWETHPermissionedGameProxy: deployOutput.delayedWETHPermissionedGameProxy, - delayedWETHPermissionlessGameProxy: deployOutput.delayedWETHPermissionlessGameProxy + opChainProxyAdmin: _chainContracts.proxyAdmin, + addressManager: _chainContracts.addressManager, + l1ERC721BridgeProxy: _chainContracts.l1ERC721Bridge, + systemConfigProxy: _chainContracts.systemConfig, + optimismMintableERC20FactoryProxy: _chainContracts.optimismMintableERC20Factory, + l1StandardBridgeProxy: _chainContracts.l1StandardBridge, + l1CrossDomainMessengerProxy: _chainContracts.l1CrossDomainMessenger, + optimismPortalProxy: _chainContracts.optimismPortal, + ethLockboxProxy: _chainContracts.ethLockbox, + disputeGameFactoryProxy: _chainContracts.disputeGameFactory, + anchorStateRegistryProxy: _chainContracts.anchorStateRegistry, + faultDisputeGame: IFaultDisputeGame(address(0)), + permissionedDisputeGame: IPermissionedDisputeGame(address(0)), + delayedWETHPermissionedGameProxy: _chainContracts.delayedWETH, + delayedWETHPermissionlessGameProxy: IDelayedWETH(payable(address(0))) }); + } - checkOutput(_input, output_); + /// @notice Converts IOPContractsManager.DeployOutput to Output. + /// @param _deployOutput The deploy output. + /// @return output_ The output parameters. + function fromOPCMV1OutputToOutput(IOPContractsManager.DeployOutput memory _deployOutput) + internal + pure + returns (Output memory output_) + { + output_ = Output({ + opChainProxyAdmin: _deployOutput.opChainProxyAdmin, + addressManager: _deployOutput.addressManager, + l1ERC721BridgeProxy: _deployOutput.l1ERC721BridgeProxy, + systemConfigProxy: _deployOutput.systemConfigProxy, + optimismMintableERC20FactoryProxy: _deployOutput.optimismMintableERC20FactoryProxy, + l1StandardBridgeProxy: _deployOutput.l1StandardBridgeProxy, + l1CrossDomainMessengerProxy: _deployOutput.l1CrossDomainMessengerProxy, + optimismPortalProxy: _deployOutput.optimismPortalProxy, + ethLockboxProxy: _deployOutput.ethLockboxProxy, + disputeGameFactoryProxy: _deployOutput.disputeGameFactoryProxy, + anchorStateRegistryProxy: _deployOutput.anchorStateRegistryProxy, + faultDisputeGame: _deployOutput.faultDisputeGame, + permissionedDisputeGame: _deployOutput.permissionedDisputeGame, + delayedWETHPermissionedGameProxy: _deployOutput.delayedWETHPermissionedGameProxy, + delayedWETHPermissionlessGameProxy: _deployOutput.delayedWETHPermissionlessGameProxy + }); } // -------- Validations -------- @@ -187,12 +336,23 @@ contract DeployOPChain is Script { SystemConfig: address(_o.systemConfigProxy), L1ERC721Bridge: address(_o.l1ERC721BridgeProxy), ProtocolVersions: address(0), - SuperchainConfig: address(0) + SuperchainConfig: address(_i.superchainConfig) }); - // Check dispute games - // With v2 game contracts enabled, we use the predeployed pdg implementation - address expectedPDGImpl = IOPContractsManager(_i.opcm).implementations().permissionedDisputeGameV2Impl; + // Check dispute games and get superchain config + address expectedPDGImpl = address(_o.permissionedDisputeGame); + + if (isDevFeatureOpcmV2Enabled(_i.opcm)) { + // OPCM v2: use implementations from v2 contract + IOPContractsManagerV2 opcmV2 = IOPContractsManagerV2(_i.opcm); + expectedPDGImpl = opcmV2.implementations().permissionedDisputeGameV2Impl; + } else { + // OPCM v1: use implementations from v1 contract + IOPContractsManager opcm = IOPContractsManager(_i.opcm); + // With v2 game contracts enabled, we use the predeployed pdg implementation + expectedPDGImpl = opcm.implementations().permissionedDisputeGameV2Impl; + } + ChainAssertions.checkDisputeGameFactory( _o.disputeGameFactoryProxy, _i.opChainProxyAdminOwner, expectedPDGImpl, true ); @@ -201,7 +361,7 @@ contract DeployOPChain is Script { ChainAssertions.checkL1CrossDomainMessenger(_o.l1CrossDomainMessengerProxy, vm, true); ChainAssertions.checkOptimismPortal2({ _contracts: proxies, - _superchainConfig: IOPContractsManager(_i.opcm).superchainConfig(), + _superchainConfig: _i.superchainConfig, _opChainProxyAdminOwner: _i.opChainProxyAdminOwner, _isProxy: true }); diff --git a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol index 23deddc11e7d6..3fb08277fca91 100644 --- a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol @@ -5,8 +5,11 @@ import { IProxy } from "interfaces/universal/IProxy.sol"; import { Script } from "forge-std/Script.sol"; import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; contract ReadImplementationAddresses is Script { struct Input { @@ -59,24 +62,53 @@ contract ReadImplementationAddresses is Script { vm.prank(address(0)); output_.l1StandardBridge = IStaticL1ChugSplashProxy(_input.l1StandardBridgeProxy).getImplementation(); - // Get implementations from OPCM - IOPContractsManager opcm = IOPContractsManager(_input.opcm); - output_.opcmGameTypeAdder = address(opcm.opcmGameTypeAdder()); - output_.opcmDeployer = address(opcm.opcmDeployer()); - output_.opcmUpgrader = address(opcm.opcmUpgrader()); - output_.opcmInteropMigrator = address(opcm.opcmInteropMigrator()); - output_.opcmStandardValidator = address(opcm.opcmStandardValidator()); + // Check if OPCM v2 is being used + bool useV2 = IOPContractsManager(_input.opcm).isDevFeatureEnabled(DevFeatures.OPCM_V2); - IOPContractsManager.Implementations memory impls = opcm.implementations(); - output_.mipsSingleton = impls.mipsImpl; - output_.delayedWETH = impls.delayedWETHImpl; - output_.ethLockbox = impls.ethLockboxImpl; - output_.anchorStateRegistry = impls.anchorStateRegistryImpl; - output_.optimismPortalInterop = impls.optimismPortalInteropImpl; - output_.faultDisputeGameV2 = impls.faultDisputeGameV2Impl; - output_.permissionedDisputeGameV2 = impls.permissionedDisputeGameV2Impl; - output_.superFaultDisputeGame = impls.superFaultDisputeGameImpl; - output_.superPermissionedDisputeGame = impls.superPermissionedDisputeGameImpl; + if (useV2) { + // Get implementations from OPCM V2 + IOPContractsManagerV2 opcmV2 = IOPContractsManagerV2(_input.opcm); + + // OPCMV2 doesn't expose these addresses directly, so we set them to zero + // These are internal to the OPCM container and not meant to be accessed externally + output_.opcmGameTypeAdder = address(0); + output_.opcmDeployer = address(0); + output_.opcmUpgrader = address(0); + output_.opcmInteropMigrator = address(0); + + // StandardValidator is accessible via the standardValidator() method + output_.opcmStandardValidator = address(opcmV2.opcmStandardValidator()); + + IOPContractsManagerContainer.Implementations memory impls = opcmV2.implementations(); + output_.mipsSingleton = impls.mipsImpl; + output_.delayedWETH = impls.delayedWETHImpl; + output_.ethLockbox = impls.ethLockboxImpl; + output_.anchorStateRegistry = impls.anchorStateRegistryImpl; + output_.optimismPortalInterop = impls.optimismPortalInteropImpl; + output_.faultDisputeGameV2 = impls.faultDisputeGameV2Impl; + output_.permissionedDisputeGameV2 = impls.permissionedDisputeGameV2Impl; + output_.superFaultDisputeGame = impls.superFaultDisputeGameImpl; + output_.superPermissionedDisputeGame = impls.superPermissionedDisputeGameImpl; + } else { + // Get implementations from OPCM V1 + IOPContractsManager opcm = IOPContractsManager(_input.opcm); + output_.opcmGameTypeAdder = address(opcm.opcmGameTypeAdder()); + output_.opcmDeployer = address(opcm.opcmDeployer()); + output_.opcmUpgrader = address(opcm.opcmUpgrader()); + output_.opcmInteropMigrator = address(opcm.opcmInteropMigrator()); + output_.opcmStandardValidator = address(opcm.opcmStandardValidator()); + + IOPContractsManager.Implementations memory impls = opcm.implementations(); + output_.mipsSingleton = impls.mipsImpl; + output_.delayedWETH = impls.delayedWETHImpl; + output_.ethLockbox = impls.ethLockboxImpl; + output_.anchorStateRegistry = impls.anchorStateRegistryImpl; + output_.optimismPortalInterop = impls.optimismPortalInteropImpl; + output_.faultDisputeGameV2 = impls.faultDisputeGameV2Impl; + output_.permissionedDisputeGameV2 = impls.permissionedDisputeGameV2Impl; + output_.superFaultDisputeGame = impls.superFaultDisputeGameImpl; + output_.superPermissionedDisputeGame = impls.superPermissionedDisputeGameImpl; + } // Get L1CrossDomainMessenger from AddressManager IAddressManager am = IAddressManager(_input.addressManager); diff --git a/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol index d238b55edc23c..d180f80ffdc23 100644 --- a/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol @@ -3,26 +3,53 @@ pragma solidity ^0.8.0; import { Script } from "forge-std/Script.sol"; import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { OPContractsManagerV2 } from "src/L1/opcm/OPContractsManagerV2.sol"; import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; contract UpgradeOPChainInput is BaseDeployIO { address internal _prank; - OPContractsManager internal _opcm; - bytes _opChainConfigs; + address internal _opcm; + /// @notice The upgrade input is stored as opaque bytes to allow storing both OPCM v1 and v2 upgrade inputs. + bytes _upgradeInput; // Setter for OPContractsManager type function set(bytes4 _sel, address _value) public { require(address(_value) != address(0), "UpgradeOPCMInput: cannot set zero address"); if (_sel == this.prank.selector) _prank = _value; - else if (_sel == this.opcm.selector) _opcm = OPContractsManager(_value); + else if (_sel == this.opcm.selector) _opcm = _value; else revert("UpgradeOPCMInput: unknown selector"); } + /// @notice Sets the upgrade input using the OPContractsManager.OpChainConfig[] type, + /// this is used when upgrading chains using OPCM v1. + /// @param _sel The selector of the field to set. + /// @param _value The value to set. function set(bytes4 _sel, OPContractsManager.OpChainConfig[] memory _value) public { + if (OPContractsManager(opcm()).isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + revert("UpgradeOPCMInput: cannot set OPCM v1 upgrade input when OPCM v2 is enabled"); + } require(_value.length > 0, "UpgradeOPCMInput: cannot set empty array"); - if (_sel == this.opChainConfigs.selector) _opChainConfigs = abi.encode(_value); + if (_sel == this.upgradeInput.selector) _upgradeInput = abi.encode(_value); + else revert("UpgradeOPCMInput: unknown selector"); + } + + /// @notice Sets the upgrade input using the OPContractsManagerV2.UpgradeInput type, + /// this is used when upgrading chains using OPCM v2. + /// Minimal validation is performed, relying on the OPCM v2 contract to perform the proper validation. + /// This is done to avoid duplicating the validation logic in the script. + /// @param _sel The selector of the field to set. + /// @param _value The value to set. + function set(bytes4 _sel, OPContractsManagerV2.UpgradeInput memory _value) public { + if (!OPContractsManager(opcm()).isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + revert("UpgradeOPCMInput: cannot set OPCM v2 upgrade input when OPCM v1 is enabled"); + } + require(address(_value.systemConfig) != address(0), "UpgradeOPCMInput: cannot set zero address"); + require(_value.disputeGameConfigs.length > 0, "UpgradeOPCMInput: cannot set empty dispute game configs array"); + + if (_sel == this.upgradeInput.selector) _upgradeInput = abi.encode(_value); else revert("UpgradeOPCMInput: unknown selector"); } @@ -31,45 +58,91 @@ contract UpgradeOPChainInput is BaseDeployIO { return _prank; } - function opcm() public view returns (OPContractsManager) { - require(address(_opcm) != address(0), "UpgradeOPCMInput: not set"); + function opcm() public view returns (address) { + require(_opcm != address(0), "UpgradeOPCMInput: not set"); return _opcm; } - function opChainConfigs() public view returns (bytes memory) { - require(_opChainConfigs.length > 0, "UpgradeOPCMInput: not set"); - return _opChainConfigs; + function upgradeInput() public view returns (bytes memory) { + require(_upgradeInput.length > 0, "UpgradeOPCMInput: not set"); + return _upgradeInput; } } contract UpgradeOPChain is Script { function run(UpgradeOPChainInput _uoci) external { - OPContractsManager opcm = _uoci.opcm(); - OPContractsManager.OpChainConfig[] memory opChainConfigs = - abi.decode(_uoci.opChainConfigs(), (OPContractsManager.OpChainConfig[])); + address opcm = _uoci.opcm(); + + // First, we need to check what version of OPCM is being used. + bool useOPCMv2 = OPContractsManager(opcm).isDevFeatureEnabled(DevFeatures.OPCM_V2); // Etch DummyCaller contract. This contract is used to mimic the contract that is used // as the source of the delegatecall to the OPCM. In practice this will be the governance // 2/2 or similar. address prank = _uoci.prank(); - bytes memory code = vm.getDeployedCode("UpgradeOPChain.s.sol:DummyCaller"); + bytes memory code = _getDummyCallerCode(useOPCMv2); vm.etch(prank, code); vm.store(prank, bytes32(0), bytes32(uint256(uint160(address(opcm))))); vm.label(prank, "DummyCaller"); // Call into the DummyCaller. This will perform the delegatecall under the hood and // return the result. - vm.broadcast(msg.sender); - (bool success,) = DummyCaller(prank).upgrade(opChainConfigs); + (bool success,) = _upgrade(prank, useOPCMv2, _uoci.upgradeInput()); require(success, "UpgradeChain: upgrade failed"); } + + /// @notice Helper function to get the proper dummy caller code based on the OPCM version. + /// @param _useOPCMv2 Whether to use OPCM v2. + /// @return code The code of the dummy caller. + function _getDummyCallerCode(bool _useOPCMv2) internal view returns (bytes memory) { + if (_useOPCMv2) return vm.getDeployedCode("UpgradeOPChain.s.sol:DummyCallerV2"); + else return vm.getDeployedCode("UpgradeOPChain.s.sol:DummyCallerV1"); + } + + /// @notice Helper function to upgrade the OPCM based on the OPCM version. Performs the decoding of the upgrade + /// input and the delegatecall to the OPCM. + /// @param _prank The address of the dummy caller contract. + /// @param _useOPCMv2 Whether to use OPCM v2. + /// @param _upgradeInput The upgrade input. + /// @return success Whether the upgrade succeeded. + /// @return result The result of the upgrade (bool, bytes memory). + function _upgrade( + address _prank, + bool _useOPCMv2, + bytes memory _upgradeInput + ) + internal + returns (bool, bytes memory) + { + vm.broadcast(msg.sender); + if (_useOPCMv2) { + return DummyCallerV2(_prank).upgrade(abi.decode(_upgradeInput, (OPContractsManagerV2.UpgradeInput))); + } else { + return DummyCallerV1(_prank).upgrade(abi.decode(_upgradeInput, (OPContractsManager.OpChainConfig[]))); + } + } +} +/// @title DummyCallerV2 +/// @notice This contract is used to mimic the contract that is used as the source of the delegatecall to the OPCM v2. +/// Uses OPContractsManagerV2.UpgradeInput type for the upgrade input. + +contract DummyCallerV2 { + address internal _opcmAddr; + + function upgrade(OPContractsManagerV2.UpgradeInput memory _upgradeInput) external returns (bool, bytes memory) { + bytes memory data = abi.encodeCall(OPContractsManagerV2.upgrade, _upgradeInput); + (bool success, bytes memory result) = _opcmAddr.delegatecall(data); + return (success, result); + } } +/// @notice This contract is used to mimic the contract that is used as the source of the delegatecall to the OPCM v1. +/// Uses OPContractsManager.OpChainConfig[] type for the upgrade input. -contract DummyCaller { +contract DummyCallerV1 { address internal _opcmAddr; function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) external returns (bool, bytes memory) { - bytes memory data = abi.encodeCall(DummyCaller.upgrade, _opChainConfigs); + bytes memory data = abi.encodeCall(OPContractsManager.upgrade, _opChainConfigs); (bool success, bytes memory result) = _opcmAddr.delegatecall(data); return (success, result); } diff --git a/packages/contracts-bedrock/scripts/deploy/UpgradeSuperchainConfig.s.sol b/packages/contracts-bedrock/scripts/deploy/UpgradeSuperchainConfig.s.sol index 0f4d110f81174..8cc0cb13123d2 100644 --- a/packages/contracts-bedrock/scripts/deploy/UpgradeSuperchainConfig.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/UpgradeSuperchainConfig.s.sol @@ -3,13 +3,17 @@ pragma solidity 0.8.15; import { Script } from "forge-std/Script.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; contract UpgradeSuperchainConfig is Script { struct Input { address prank; - IOPContractsManager opcm; + address opcm; ISuperchainConfig superchainConfig; + IOPContractsManagerUtils.ExtraInstruction[] extraInstructions; } /// @notice Delegate calls upgradeSuperchainConfig on the OPCM from the input.prank address. @@ -17,34 +21,64 @@ contract UpgradeSuperchainConfig is Script { // Make sure the input is valid assertValidInput(_input); - IOPContractsManager opcm = _input.opcm; + // Both OPCM v1 and v2 implement the isDevFeatureEnabled function. + bool useOPCMv2 = IOPContractsManager(_input.opcm).isDevFeatureEnabled(DevFeatures.OPCM_V2); + + address opcm = _input.opcm; // Etch DummyCaller contract. This contract is used to mimic the contract that is used // as the source of the delegatecall to the OPCM. In practice this will be the governance // 2/2 or similar. address prank = _input.prank; - bytes memory code = vm.getDeployedCode("UpgradeSuperchainConfig.s.sol:DummyCaller"); + bytes memory code = _getDummyCallerCode(useOPCMv2); vm.etch(prank, code); - vm.store(prank, bytes32(0), bytes32(uint256(uint160(address(opcm))))); + vm.store(prank, bytes32(0), bytes32(uint256(uint160(opcm)))); vm.label(prank, "DummyCaller"); - ISuperchainConfig superchainConfig = _input.superchainConfig; - - // Call into the DummyCaller to perform the delegatecall - vm.broadcast(msg.sender); - - (bool success,) = DummyCaller(prank).upgradeSuperchainConfig(superchainConfig); + (bool success,) = _upgrade(prank, useOPCMv2, _input); require(success, "UpgradeSuperchainConfig: upgradeSuperchainConfig failed"); } /// @notice Asserts that the input is valid. function assertValidInput(Input memory _input) internal pure { + // Note: Intentionally not checking extra instructions for OPCM v2 as they are not required in some upgrades. + // This responsibility is delegated to the OPCM v2 contract. require(_input.prank != address(0), "UpgradeSuperchainConfig: prank not set"); require(address(_input.opcm) != address(0), "UpgradeSuperchainConfig: opcm not set"); require(address(_input.superchainConfig) != address(0), "UpgradeSuperchainConfig: superchainConfig not set"); } + + /// @notice Helper function to get the proper dummy caller code based on the OPCM version. + /// @param _useOPCMv2 Whether to use OPCM v2. + /// @return code The code of the dummy caller. + function _getDummyCallerCode(bool _useOPCMv2) internal view returns (bytes memory) { + if (_useOPCMv2) return vm.getDeployedCode("UpgradeSuperchainConfig.s.sol:DummyCallerV2"); + else return vm.getDeployedCode("UpgradeSuperchainConfig.s.sol:DummyCaller"); + } + + /// @notice Helper function to upgrade the OPCM based on the OPCM version. Performs the decoding of the upgrade + /// input and the delegatecall to the OPCM. + /// @param _prank The address of the dummy caller contract. + /// @param _useOPCMv2 Whether to use OPCM v2. + /// @param _input The input. + /// @return success Whether the upgrade succeeded. + /// @return result The result of the upgrade (bool, bytes memory). + function _upgrade(address _prank, bool _useOPCMv2, Input memory _input) internal returns (bool, bytes memory) { + // Call into the DummyCaller to perform the delegatecall + vm.broadcast(msg.sender); + if (_useOPCMv2) { + return DummyCallerV2(_prank).upgradeSuperchain( + IOPContractsManagerV2.SuperchainUpgradeInput({ + superchainConfig: _input.superchainConfig, + extraInstructions: _input.extraInstructions + }) + ); + } else { + return DummyCaller(_prank).upgradeSuperchainConfig(_input.superchainConfig); + } + } } /// @title DummyCaller @@ -58,3 +92,19 @@ contract DummyCaller { return (success, result); } } + +/// @title DummyCallerV2 +/// @notice This contract is used to mimic the contract that is used as the source of the delegatecall to the OPCM v2. +/// Uses IOPContractsManagerV2.SuperchainUpgradeInput type for the upgrade input. +contract DummyCallerV2 { + address internal _opcmAddr; + + function upgradeSuperchain(IOPContractsManagerV2.SuperchainUpgradeInput memory _superchainUpgradeInput) + external + returns (bool, bytes memory) + { + bytes memory data = abi.encodeCall(IOPContractsManagerV2.upgradeSuperchain, (_superchainUpgradeInput)); + (bool success, bytes memory result) = _opcmAddr.delegatecall(data); + return (success, result); + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol index 11c209ee151b3..989ef73c9d3e7 100644 --- a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol @@ -45,6 +45,9 @@ contract VerifyOPCM is Script { /// @notice Thrown when contractsContainer addresses are not the same across all OPCM components. error VerifyOPCM_ContractsContainerMismatch(); + /// @notice Thrown when opcmUtils addresses are not the same across all OPCM components that have it. + error VerifyOPCM_OpcmUtilsMismatch(); + /// @notice Thrown when the creation bytecode is not found in an artifact file. error VerifyOPCM_CreationBytecodeNotFound(string _artifactPath); @@ -127,6 +130,7 @@ contract VerifyOPCM is Script { fieldNameOverrides["superPermissionedDisputeGame2"] = "SuperPermissionedDisputeGame"; fieldNameOverrides["opcmGameTypeAdder"] = "OPContractsManagerGameTypeAdder"; fieldNameOverrides["opcmDeployer"] = "OPContractsManagerDeployer"; + fieldNameOverrides["opcmMigrator"] = "OPContractsManagerMigrator"; fieldNameOverrides["opcmUpgrader"] = "OPContractsManagerUpgrader"; fieldNameOverrides["opcmInteropMigrator"] = "OPContractsManagerInteropMigrator"; fieldNameOverrides["opcmStandardValidator"] = "OPContractsManagerStandardValidator"; @@ -166,6 +170,7 @@ contract VerifyOPCM is Script { expectedGetters["opcmDeployer"] = "SKIP"; // Address verified via bytecode comparison expectedGetters["opcmGameTypeAdder"] = "SKIP"; // Address verified via bytecode comparison expectedGetters["opcmInteropMigrator"] = "SKIP"; // Address verified via bytecode comparison + expectedGetters["opcmMigrator"] = "SKIP"; // Address verified via bytecode comparison expectedGetters["opcmStandardValidator"] = "SKIP"; // Address verified via bytecode comparison expectedGetters["opcmUpgrader"] = "SKIP"; // Address verified via bytecode comparison @@ -265,6 +270,9 @@ contract VerifyOPCM is Script { // Verify that all component contracts have the same contractsContainer address. _verifyContractsContainerConsistency(propRefs); + // Verify that all component contracts that have opcmUtils() have the same address. + _verifyOpcmUtilsConsistency(propRefs); + // Get the ContractsContainer address from the first component (they're all the same) address contractsContainerAddr = address(0); for (uint256 i = 0; i < propRefs.length; i++) { @@ -407,6 +415,81 @@ contract VerifyOPCM is Script { return abi.decode(returnData, (address)); } + /// @notice Checks if a field name represents an OPCM component contract that has opcmUtils(). + /// @param _field The field name to check. + /// @return True if the field represents an OPCM component with opcmUtils(), false otherwise. + function _hasOpcmUtils(string memory _field) internal pure returns (bool) { + // Only opcmV2 and opcmMigrator have opcmUtils() via OPContractsManagerUtilsCaller + return LibString.eq(_field, "opcmV2") || LibString.eq(_field, "opcmMigrator"); + } + + /// @notice Gets the opcmUtils address from a contract. + /// @param _contract The contract address to query. + /// @return The opcmUtils address. + function _getOpcmUtilsAddress(address _contract) internal view returns (address) { + // Call the opcmUtils() function on the contract. + // nosemgrep: sol-style-use-abi-encodecall + (bool success, bytes memory returnData) = _contract.staticcall(abi.encodeWithSignature("opcmUtils()")); + if (!success) { + console.log( + string.concat("[FAIL] ERROR: Failed to call opcmUtils() function on contract ", vm.toString(_contract)) + ); + return address(0); + } + return abi.decode(returnData, (address)); + } + + /// @notice Verifies that all OPCM component contracts that have opcmUtils() have the same address. + /// @param _propRefs Array of property references containing component addresses. + function _verifyOpcmUtilsConsistency(OpcmContractRef[] memory _propRefs) internal view { + // Process components that have opcmUtils(), validate addresses, and verify consistency + OpcmContractRef[] memory components = new OpcmContractRef[](_propRefs.length); + address[] memory utilsAddresses = new address[](_propRefs.length); + uint256 componentCount = 0; + address expectedUtils = address(0); + + for (uint256 i = 0; i < _propRefs.length; i++) { + OpcmContractRef memory propRef = _propRefs[i]; + + if (!_hasOpcmUtils(propRef.field)) { + continue; + } + + components[componentCount] = propRef; + address utilsAddr = _getOpcmUtilsAddress(propRef.addr); + + if (utilsAddr == address(0)) { + console.log(string.concat("ERROR: Failed to retrieve opcmUtils address from ", propRef.field)); + revert VerifyOPCM_OpcmUtilsMismatch(); + } + + utilsAddresses[componentCount] = utilsAddr; + + if (componentCount == 0) { + expectedUtils = utilsAddr; + } else if (utilsAddr != expectedUtils) { + console.log("ERROR: opcmUtils addresses are not consistent across all components"); + for (uint256 j = 0; j <= componentCount; j++) { + console.log(string.concat(" ", components[j].field, ": ", vm.toString(utilsAddresses[j]))); + } + revert VerifyOPCM_OpcmUtilsMismatch(); + } + + componentCount++; + } + + // Ensure we found at least one component + if (componentCount == 0) { + console.log("OK: No OPCM components with opcmUtils() found (skipping verification)"); + return; + } + + console.log( + string.concat("OK: All ", vm.toString(componentCount), " components have the same opcmUtils address") + ); + console.log(string.concat(" opcmUtils: ", vm.toString(expectedUtils))); + } + /// @notice Verifies a single OPCM contract reference (implementation or bytecode). /// @param _opcm The OPCM contract that contains the target contract reference. /// @param _target The target contract reference to verify. diff --git a/packages/contracts-bedrock/scripts/libraries/Types.sol b/packages/contracts-bedrock/scripts/libraries/Types.sol index 7b90b7204e9a3..bec2f022e9120 100644 --- a/packages/contracts-bedrock/scripts/libraries/Types.sol +++ b/packages/contracts-bedrock/scripts/libraries/Types.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.0; import { Claim, Duration, GameType } from "src/dispute/lib/Types.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; library Types { /// @notice Represents a set of L1 contracts. Used to represent a set of proxies. @@ -49,6 +50,8 @@ library Types { // Fee params uint32 operatorFeeScalar; uint64 operatorFeeConstant; + // Superchain contracts + ISuperchainConfig superchainConfig; // Whether to use the custom gas token. bool useCustomGasToken; } diff --git a/packages/contracts-bedrock/snapshots/abi/AccessManager.json b/packages/contracts-bedrock/snapshots/abi/AccessManager.json new file mode 100644 index 0000000000000..adcdb13fd5249 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/AccessManager.json @@ -0,0 +1,285 @@ +[ + { + "inputs": [ + { + "internalType": "uint256", + "name": "_fallbackTimeout", + "type": "uint256" + }, + { + "internalType": "contract IDisputeGameFactory", + "name": "_disputeGameFactory", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "DEPLOYMENT_TIMESTAMP", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "DISPUTE_GAME_FACTORY", + "outputs": [ + { + "internalType": "contract IDisputeGameFactory", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "FALLBACK_TIMEOUT", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "challengers", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "getLastProposalTimestamp", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_challenger", + "type": "address" + } + ], + "name": "isAllowedChallenger", + "outputs": [ + { + "internalType": "bool", + "name": "allowed_", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_proposer", + "type": "address" + } + ], + "name": "isAllowedProposer", + "outputs": [ + { + "internalType": "bool", + "name": "allowed_", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "isProposalPermissionlessMode", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "proposers", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_challenger", + "type": "address" + }, + { + "internalType": "bool", + "name": "_allowed", + "type": "bool" + } + ], + "name": "setChallenger", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_proposer", + "type": "address" + }, + { + "internalType": "bool", + "name": "_allowed", + "type": "bool" + } + ], + "name": "setProposer", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "challenger", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "allowed", + "type": "bool" + } + ], + "name": "ChallengerPermissionUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "proposer", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "allowed", + "type": "bool" + } + ], + "name": "ProposerPermissionUpdated", + "type": "event" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json index 26a1351d53d79..57107a1de6fbd 100644 --- a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json @@ -781,6 +781,25 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "rootClaimByChainId", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "splitDepth", @@ -1212,6 +1231,11 @@ "name": "UnexpectedString", "type": "error" }, + { + "inputs": [], + "name": "UnknownChainId", + "type": "error" + }, { "inputs": [], "name": "ValidStep", diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json index c3c9a6555f2a8..92cc0f80814e5 100644 --- a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json @@ -751,6 +751,25 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "rootClaimByChainId", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, { "inputs": [], "name": "splitDepth", @@ -1177,6 +1196,11 @@ "name": "UnexpectedString", "type": "error" }, + { + "inputs": [], + "name": "UnknownChainId", + "type": "error" + }, { "inputs": [], "name": "ValidStep", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index b5871765022c4..07ce75d6ff864 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -1104,6 +1104,11 @@ "name": "LatestReleaseNotSet", "type": "error" }, + { + "inputs": [], + "name": "OPContractsManager_V2Enabled", + "type": "error" + }, { "inputs": [], "name": "OnlyDelegatecall", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json new file mode 100644 index 0000000000000..07af1688b84e0 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json @@ -0,0 +1,128 @@ +[ + { + "inputs": [ + { + "internalType": "contract IOPContractsManagerContainer", + "name": "_contractsContainer", + "type": "address" + }, + { + "internalType": "contract IOPContractsManagerUtils", + "name": "_utils", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "contractsContainer", + "outputs": [ + { + "internalType": "contract IOPContractsManagerContainer", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "contract ISystemConfig[]", + "name": "chainSystemConfigs", + "type": "address[]" + }, + { + "components": [ + { + "internalType": "bool", + "name": "enabled", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "initBond", + "type": "uint256" + }, + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "bytes", + "name": "gameArgs", + "type": "bytes" + } + ], + "internalType": "struct IOPContractsManagerUtils.DisputeGameConfig[]", + "name": "disputeGameConfigs", + "type": "tuple[]" + }, + { + "components": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2SequenceNumber", + "type": "uint256" + } + ], + "internalType": "struct Proposal", + "name": "startingAnchorRoot", + "type": "tuple" + }, + { + "internalType": "GameType", + "name": "startingRespectedGameType", + "type": "uint32" + } + ], + "internalType": "struct OPContractsManagerMigrator.MigrateInput", + "name": "_input", + "type": "tuple" + } + ], + "name": "migrate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "opcmUtils", + "outputs": [ + { + "internalType": "contract IOPContractsManagerUtils", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "OPContractsManagerMigrator_InvalidStartingRespectedGameType", + "type": "error" + }, + { + "inputs": [], + "name": "OPContractsManagerMigrator_ProxyAdminOwnerMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "OPContractsManagerMigrator_SuperchainConfigMismatch", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json index 50668d05a9bf0..5f234403fa085 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json @@ -111,6 +111,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + } + ], + "name": "getGameImpl", + "outputs": [ + { + "internalType": "contract IDisputeGame", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -506,6 +525,62 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_l2ChainId", + "type": "uint256" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "contract IDelayedWETH", + "name": "_delayedWETH", + "type": "address" + }, + { + "components": [ + { + "internalType": "bool", + "name": "enabled", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "initBond", + "type": "uint256" + }, + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "bytes", + "name": "gameArgs", + "type": "bytes" + } + ], + "internalType": "struct IOPContractsManagerUtils.DisputeGameConfig", + "name": "_gcfg", + "type": "tuple" + } + ], + "name": "makeGameArgs", + "outputs": [ + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -621,6 +696,11 @@ "name": "OPContractsManagerUtils_ProxyMustLoad", "type": "error" }, + { + "inputs": [], + "name": "OPContractsManagerUtils_UnsupportedGameType", + "type": "error" + }, { "inputs": [], "name": "ReservedBitsSet", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json index 17555aea0617a..f2d5fae6137f2 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json @@ -11,6 +11,11 @@ "name": "_standardValidator", "type": "address" }, + { + "internalType": "contract IOPContractsManagerMigrator", + "name": "_migrator", + "type": "address" + }, { "internalType": "contract IOPContractsManagerUtils", "name": "_utils", @@ -209,7 +214,7 @@ "type": "bytes" } ], - "internalType": "struct OPContractsManagerV2.DisputeGameConfig[]", + "internalType": "struct IOPContractsManagerUtils.DisputeGameConfig[]", "name": "disputeGameConfigs", "type": "tuple[]" }, @@ -458,6 +463,88 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "components": [ + { + "internalType": "contract ISystemConfig[]", + "name": "chainSystemConfigs", + "type": "address[]" + }, + { + "components": [ + { + "internalType": "bool", + "name": "enabled", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "initBond", + "type": "uint256" + }, + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "bytes", + "name": "gameArgs", + "type": "bytes" + } + ], + "internalType": "struct IOPContractsManagerUtils.DisputeGameConfig[]", + "name": "disputeGameConfigs", + "type": "tuple[]" + }, + { + "components": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2SequenceNumber", + "type": "uint256" + } + ], + "internalType": "struct Proposal", + "name": "startingAnchorRoot", + "type": "tuple" + }, + { + "internalType": "GameType", + "name": "startingRespectedGameType", + "type": "uint32" + } + ], + "internalType": "struct IOPContractsManagerMigrator.MigrateInput", + "name": "_input", + "type": "tuple" + } + ], + "name": "migrate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "opcmMigrator", + "outputs": [ + { + "internalType": "contract IOPContractsManagerMigrator", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "opcmStandardValidator", @@ -529,7 +616,7 @@ "type": "bytes" } ], - "internalType": "struct OPContractsManagerV2.DisputeGameConfig[]", + "internalType": "struct IOPContractsManagerUtils.DisputeGameConfig[]", "name": "disputeGameConfigs", "type": "tuple[]" }, @@ -764,11 +851,6 @@ "name": "OPContractsManagerV2_SuperchainConfigNeedsUpgrade", "type": "error" }, - { - "inputs": [], - "name": "OPContractsManagerV2_UnsupportedGameType", - "type": "error" - }, { "inputs": [], "name": "ReservedBitsSet", diff --git a/packages/contracts-bedrock/snapshots/abi/OPSuccinctFaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/OptimisticZkGame.json similarity index 96% rename from packages/contracts-bedrock/snapshots/abi/OPSuccinctFaultDisputeGame.json rename to packages/contracts-bedrock/snapshots/abi/OptimisticZkGame.json index 9e60e00347a4d..5dc919132786b 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPSuccinctFaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/OptimisticZkGame.json @@ -112,7 +112,7 @@ "name": "challenge", "outputs": [ { - "internalType": "enum OPSuccinctFaultDisputeGame.ProposalStatus", + "internalType": "enum OptimisticZkGame.ProposalStatus", "name": "", "type": "uint8" } @@ -171,7 +171,7 @@ "type": "bytes32" }, { - "internalType": "enum OPSuccinctFaultDisputeGame.ProposalStatus", + "internalType": "enum OptimisticZkGame.ProposalStatus", "name": "status", "type": "uint8" }, @@ -413,7 +413,7 @@ "name": "prove", "outputs": [ { - "internalType": "enum OPSuccinctFaultDisputeGame.ProposalStatus", + "internalType": "enum OptimisticZkGame.ProposalStatus", "name": "", "type": "uint8" } @@ -505,6 +505,25 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "rootClaimByChainId", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, { "inputs": [], "name": "sp1Verifier", diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json index 8bb88f4663986..f9718eac61dcd 100644 --- a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json @@ -817,6 +817,25 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "rootClaimByChainId", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "splitDepth", @@ -1253,6 +1272,11 @@ "name": "UnexpectedString", "type": "error" }, + { + "inputs": [], + "name": "UnknownChainId", + "type": "error" + }, { "inputs": [], "name": "ValidStep", diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json index 09352c2b75827..fb9900ca2d400 100644 --- a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json @@ -777,6 +777,25 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_chainId", + "type": "uint256" + } + ], + "name": "rootClaimByChainId", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, { "inputs": [], "name": "splitDepth", @@ -1208,6 +1227,11 @@ "name": "UnexpectedString", "type": "error" }, + { + "inputs": [], + "name": "UnknownChainId", + "type": "error" + }, { "inputs": [], "name": "ValidStep", diff --git a/packages/contracts-bedrock/snapshots/abi/RISCV.json b/packages/contracts-bedrock/snapshots/abi/RISCV.json deleted file mode 100644 index 1650fd3980ec9..0000000000000 --- a/packages/contracts-bedrock/snapshots/abi/RISCV.json +++ /dev/null @@ -1,68 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "contract IPreimageOracle", - "name": "_oracle", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "oracle", - "outputs": [ - { - "internalType": "contract IPreimageOracle", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "_stateData", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "_proof", - "type": "bytes" - }, - { - "internalType": "bytes32", - "name": "_localContext", - "type": "bytes32" - } - ], - "name": "step", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi_loader.go b/packages/contracts-bedrock/snapshots/abi_loader.go index 8a69f9321a9de..83119016ae9d6 100644 --- a/packages/contracts-bedrock/snapshots/abi_loader.go +++ b/packages/contracts-bedrock/snapshots/abi_loader.go @@ -16,7 +16,7 @@ var superFaultDisputeGame []byte //go:embed abi/FaultDisputeGame.json var faultDisputeGame []byte -//go:embed abi/OPSuccinctFaultDisputeGame.json +//go:embed abi/OptimisticZkGame.json var zkDisputeGame []byte //go:embed abi/PreimageOracle.json diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index eb0bc8f4b6198..3a98d960daf4d 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -24,8 +24,8 @@ "sourceCodeHash": "0xfca613b5d055ffc4c3cbccb0773ddb9030abedc1aa6508c9e2e7727cc0cd617b" }, "src/L1/OPContractsManager.sol:OPContractsManager": { - "initCodeHash": "0x51bb4fa1d01503ec16e8611ac1e2f042ea51280310f1cca2a15a4826acfc2db5", - "sourceCodeHash": "0xacc5a0e75797686ad9545dcae82c89b2ca847ba42988eb63466ef03f4e1c739e" + "initCodeHash": "0xdbf23ba71f865d1c3086a10b48f8faaa21ed0d689fdd14ec9ad988f8f013b5c3", + "sourceCodeHash": "0x048f592543a93c05085919f6a1670600ead00991e8370ae83fea1665ca09a5b4" }, "src/L1/OPContractsManagerStandardValidator.sol:OPContractsManagerStandardValidator": { "initCodeHash": "0xdec828fdb9f9bb7a35ca03d851b041fcd088681957642e949b5d320358d9b9a1", @@ -36,8 +36,8 @@ "sourceCodeHash": "0x16fb96f4d29a10d03b3b9c70edf56df51e97c2a1a3f0ba36aae79469b446ad5c" }, "src/L1/OptimismPortalInterop.sol:OptimismPortalInterop": { - "initCodeHash": "0x087281cd2a48e882648c09fa90bfcca7487d222e16300f9372deba6b2b8ccfad", - "sourceCodeHash": "0x1cc641a4272aea85e13cbf42d9032d1b91ef858eafe3be6b5649cc8504c9cf69" + "initCodeHash": "0xd361ed3b8d56dcc1f3c068ef3af9c83f3da1165bcdab097250ad4772f350c52e", + "sourceCodeHash": "0xd7d2166d29a22f3a051bc832cbce05f9ca06f1ac1bfb0790f29579f12bb95b8f" }, "src/L1/ProtocolVersions.sol:ProtocolVersions": { "initCodeHash": "0xcb59ad9a5ec2a0831b7f4daa74bdacba82ffa03035dafb499a732c641e017f4e", @@ -52,8 +52,8 @@ "sourceCodeHash": "0xb3184aa5d95a82109e7134d1f61941b30e25f655b9849a0e303d04bbce0cde0b" }, "src/L1/opcm/OPContractsManagerV2.sol:OPContractsManagerV2": { - "initCodeHash": "0x4d2822fdc1f81c51f843d027ccece5c4e847f7c5870bb068a05bd568f7354c22", - "sourceCodeHash": "0xdb47dbef330b9a8c6e644d5791ed3414796381d90b22078db682b14de30ed16d" + "initCodeHash": "0x426d53fd2634e081467a8d7bb266870fcdadeb0d23d3d50f47bd862f224c2028", + "sourceCodeHash": "0xaa1ea1b099c18edd712fcbde2aa35119b9c2953836e6a0f37d53253b0a1b1f4a" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x838bbd7f381e84e21887f72bd1da605bfc4588b3c39aed96cbce67c09335b3ee", @@ -88,8 +88,8 @@ "sourceCodeHash": "0x34186bcab29963237b4e0d7575b0a1cff7caf42ccdb55d4b2b2c767db3279189" }, "src/L2/L1Withdrawer.sol:L1Withdrawer": { - "initCodeHash": "0x91e0be0d49636212678191c06b9b6840c399f08ad946bc7b52f24231691be28b", - "sourceCodeHash": "0x25422bdaf51d611c1688a835737368c0ff2ab639dac852af8a20ebb4e16fc103" + "initCodeHash": "0x6efb9055142e90b408c6312074243769df0d365f6f984e226e0320bec55a45b8", + "sourceCodeHash": "0x6a12e541b47b79f19d1061ff7b64ffdcffa1e8d06225cca6798daca53fd96890" }, "src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger": { "initCodeHash": "0xe160be403df12709c371c33195d1b9c3b5e9499e902e86bdabc8eed749c3fd61", @@ -176,8 +176,8 @@ "sourceCodeHash": "0x734a6b2aa6406bc145d848ad6071d3af1d40852aeb8f4b2f6f51beaad476e2d3" }, "src/cannon/MIPS64.sol:MIPS64": { - "initCodeHash": "0x6a649986370d18e5fddcd89df73e520063fb373f7dba2f731a2b7e79a1c132a5", - "sourceCodeHash": "0x657afae82e6e3627389153736e568bf99498a272ec6d9ecc22ecfd645c56c453" + "initCodeHash": "0x13196c1652a1f51cf0c16191f0092898f127eff036c773923c72b02a2823c7f4", + "sourceCodeHash": "0xd745aaf4ed265be7be7bff9bca1dd040e15dfe41e3a453906d72ca09a47f2c8b" }, "src/cannon/PreimageOracle.sol:PreimageOracle": { "initCodeHash": "0x6af5b0e83b455aab8d0946c160a4dc049a4e03be69f8a2a9e87b574f27b25a66", @@ -196,12 +196,12 @@ "sourceCodeHash": "0xf19216b7943479af87a01ab8935e68561853e8e333d09719c917228bc7a01a3a" }, "src/dispute/FaultDisputeGame.sol:FaultDisputeGame": { - "initCodeHash": "0xe7d3c982532946d196d7efadb9e2576c76b8f9e0d1f885ac36977d6f3fb72a65", - "sourceCodeHash": "0x63222e6926c8dd050d1adc0e65039c42382f269c3b0e113751d79e7a5167b7ac" + "initCodeHash": "0x57b01ba6873a49b3adafe58d05e0e0c4f342281181682f9f7ecd30752395b4ad", + "sourceCodeHash": "0x04111af652e4a059b591da704a7d7a15dcb46a75be05fd7cad6b88c1b7a1ac1b" }, "src/dispute/PermissionedDisputeGame.sol:PermissionedDisputeGame": { - "initCodeHash": "0xefa478f976e55eb53fcccf653b202bc2532781230f20013450ce0845b77d815c", - "sourceCodeHash": "0x335a503a4cc02dd30d88d163393680f3fd89168e0faa4fa4b0ae5da399656f91" + "initCodeHash": "0xcd7a262ac008a2de347e459902ca7039c1c980eb312106b9cc2c1f3190ae0840", + "sourceCodeHash": "0x618013c7ad9742f59445f355f7b26347ee1727c9e6616218a6f3443f1b4bb8e0" }, "src/dispute/SuperFaultDisputeGame.sol:SuperFaultDisputeGame": { "initCodeHash": "0xb5ce71bc56109055cd0dc71fc63015443bbdb29c5975e049802cd1b5188f06ca", @@ -212,16 +212,16 @@ "sourceCodeHash": "0x314b6e0412f698ce3531e8176ce8e5b8a3976cc3fa9d7ecb1f3278612f90ed4e" }, "src/dispute/v2/FaultDisputeGameV2.sol:FaultDisputeGameV2": { - "initCodeHash": "0x6fc59e2da083c9e2093e42b0fda705e8215cc216e4dcedbf728c08f69ec2d3bd", - "sourceCodeHash": "0x7fc97734c12e207f011c4f079fffe84f5bd11f4fb4a95dd56ad6a69df184584f" + "initCodeHash": "0x2806f9c9f0babb80be2a0c40382d265d598632dc6c1902db7f5f8f214d233f2f", + "sourceCodeHash": "0x1ac7a6aa4adafe2058046f06a425c662369f756a89be956b5a222647d99fabe8" }, "src/dispute/v2/PermissionedDisputeGameV2.sol:PermissionedDisputeGameV2": { - "initCodeHash": "0x9896fd04e9a3f9fe4f1d6e93eb298b37a6bfa33424aa705e68cc58d0ba7f3f90", - "sourceCodeHash": "0xc0ff6e93b6e2b9111c11e81b5df8948ab71d02b9d2c4dfda982fcb615519f1f7" + "initCodeHash": "0xfbb451f1a0bf22bb96242db527371dd0b0c3435208f9e074441ec0aacbf414bd", + "sourceCodeHash": "0x92bb886203246108435408762fab6e56fe223c2ed5ae85b5b792653cead4ec7a" }, - "src/dispute/zk/OPSuccinctFaultDisputeGame.sol:OPSuccinctFaultDisputeGame": { - "initCodeHash": "0xb9d0d9ca4df242f188b2d5be7d692459a12409a67a6504ef44ef589c6ca2c1a9", - "sourceCodeHash": "0x85f80adb845f59e9137d462e219c0cdba27058be77d855075e286aa316735aa0" + "src/dispute/zk/OptimisticZkGame.sol:OptimisticZkGame": { + "initCodeHash": "0x6eff352a513e3ce2ac5c53e4094985bf2ae1acad3992d73d6564c95aca3aebf1", + "sourceCodeHash": "0x998796b0286830629cd50eeb003eec571680cd171f4ad80bd5cad53aca756909" }, "src/legacy/DeployerWhitelist.sol:DeployerWhitelist": { "initCodeHash": "0x2e0ef4c341367eb59cc6c25190c64eff441d3fe130189da91d4d126f6bdbc9b5", @@ -263,10 +263,6 @@ "initCodeHash": "0x1fd4b84add5c5ed80205cea0bbca9115e98d0efb416d9cedc12ce0cff9919bda", "sourceCodeHash": "0xcfbaae5729ca367328ea546bbbe96194341586b2f4bfbd0cfa84acc09324d59b" }, - "src/vendor/asterisc/RISCV.sol:RISCV": { - "initCodeHash": "0x4cd639f7da4eaf86a98eb3227fe285c0e8380ff5c79c4745aefed804cef52162", - "sourceCodeHash": "0x1d18c55a910212cc7572d2e8673c5f092db8352dda1137739c71df18d4ee1db1" - }, "src/vendor/eas/EAS.sol:EAS": { "initCodeHash": "0xbd79d6fff128b3da3e09ead84b805b7540740190488f2791a6b4e5b7aabf9cff", "sourceCodeHash": "0x3512c3a1b5871341346f6646a04c0895dd563e9824f2ab7ab965b6a81a41ad2e" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/AccessManager.json b/packages/contracts-bedrock/snapshots/storageLayout/AccessManager.json new file mode 100644 index 0000000000000..dbeafc3e6049b --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/AccessManager.json @@ -0,0 +1,23 @@ +[ + { + "bytes": "20", + "label": "_owner", + "offset": 0, + "slot": "0", + "type": "address" + }, + { + "bytes": "32", + "label": "proposers", + "offset": 0, + "slot": "1", + "type": "mapping(address => bool)" + }, + { + "bytes": "32", + "label": "challengers", + "offset": 0, + "slot": "2", + "type": "mapping(address => bool)" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerMigrator.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerMigrator.json new file mode 100644 index 0000000000000..0637a088a01e8 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerMigrator.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPSuccinctFaultDisputeGame.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimisticZkGame.json similarity index 95% rename from packages/contracts-bedrock/snapshots/storageLayout/OPSuccinctFaultDisputeGame.json rename to packages/contracts-bedrock/snapshots/storageLayout/OptimisticZkGame.json index d50456001fd46..34ce5c1cfc3ef 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPSuccinctFaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OptimisticZkGame.json @@ -32,7 +32,7 @@ "label": "claimData", "offset": 0, "slot": "1", - "type": "struct OPSuccinctFaultDisputeGame.ClaimData" + "type": "struct OptimisticZkGame.ClaimData" }, { "bytes": "32", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json b/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json deleted file mode 100644 index a79dc13a1d368..0000000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "bytes": "20", - "label": "oracle", - "offset": 0, - "slot": "0", - "type": "contract IPreimageOracle" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index b35ee3d98a473..85332af95321a 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -1977,9 +1977,9 @@ contract OPContractsManager is ISemver { // -------- Constants and Variables -------- - /// @custom:semver 6.0.0 + /// @custom:semver 6.0.1 function version() public pure virtual returns (string memory) { - return "6.0.0"; + return "6.0.1"; } OPContractsManagerGameTypeAdder public immutable opcmGameTypeAdder; @@ -2046,6 +2046,9 @@ contract OPContractsManager is ISemver { /// @notice Thrown if logic gated by a dev feature flag is incorrectly accessed. error InvalidDevFeatureAccess(bytes32 devFeature); + /// @notice Thrown when OPCM v2 is enabled via dev feature flag. + error OPContractsManager_V2Enabled(); + // -------- Methods -------- constructor( @@ -2130,6 +2133,8 @@ contract OPContractsManager is ISemver { /// @param _input The deploy input parameters for the deployment. /// @return The deploy output values of the deployment. function deploy(DeployInput calldata _input) external virtual returns (DeployOutput memory) { + _assertV2NotEnabled(); + return opcmDeployer.deploy(_input, superchainConfig, msg.sender); } @@ -2139,6 +2144,8 @@ contract OPContractsManager is ISemver { /// `_opChainConfigs`'s ProxyAdmin. /// @dev This function requires that each chain's superchainConfig is already upgraded. function upgrade(OpChainConfig[] memory _opChainConfigs) external virtual { + _assertV2NotEnabled(); + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerUpgrader.upgrade, (_opChainConfigs)); @@ -2150,6 +2157,8 @@ contract OPContractsManager is ISemver { /// @dev This function is intended to be DELEGATECALLed by the superchainConfig's ProxyAdminOwner. /// @dev This function will revert if the SuperchainConfig is already at or above the target version. function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig) external { + _assertV2NotEnabled(); + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerUpgrader.upgradeSuperchainConfig, (_superchainConfig)); @@ -2159,6 +2168,8 @@ contract OPContractsManager is ISemver { /// @notice addGameType deploys a new dispute game and links it to the DisputeGameFactory. The inputted _gameConfigs /// must be added in ascending GameType order. function addGameType(AddGameInput[] memory _gameConfigs) public virtual returns (AddGameOutput[] memory) { + _assertV2NotEnabled(); + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerGameTypeAdder.addGameType, (_gameConfigs)); @@ -2170,6 +2181,8 @@ contract OPContractsManager is ISemver { /// @notice Updates the prestate hash for dispute games while keeping all other parameters the same /// @param _prestateUpdateInputs The new prestate hashes to use function updatePrestate(UpdatePrestateInput[] memory _prestateUpdateInputs) public { + _assertV2NotEnabled(); + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerGameTypeAdder.updatePrestate, (_prestateUpdateInputs)); @@ -2180,6 +2193,8 @@ contract OPContractsManager is ISemver { /// @notice Migrates the Optimism contracts to the latest version. /// @param _input Input parameters for the migration. function migrate(OPContractsManagerInteropMigrator.MigrateInput calldata _input) external virtual { + _assertV2NotEnabled(); + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerInteropMigrator.migrate, (_input)); @@ -2220,6 +2235,13 @@ contract OPContractsManager is ISemver { return opcmDeployer.isDevFeatureEnabled(_feature); } + /// @notice Reverts if the dev feature flag for OPCM v2 is enabled. + function _assertV2NotEnabled() internal view { + if (isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + revert OPContractsManager_V2Enabled(); + } + } + /// @notice Helper function to perform a delegatecall to a target contract /// @param _target The target contract address /// @param _data The calldata to send to the target diff --git a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol index 54db98d889f8f..dec588d5cc11f 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol @@ -229,9 +229,9 @@ contract OptimismPortalInterop is Initializable, ResourceMetering, Reinitializab error OptimismPortal_MigratingToSameRegistry(); /// @notice Semantic version. - /// @custom:semver 5.1.0+interop + /// @custom:semver 5.2.0+interop function version() public pure virtual returns (string memory) { - return "5.1.0+interop"; + return "5.2.0+interop"; } /// @param _proofMaturityDelaySeconds The proof maturity delay in seconds. diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol new file mode 100644 index 0000000000000..111274613be1d --- /dev/null +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Contracts +import { OPContractsManagerUtilsCaller } from "src/L1/opcm/OPContractsManagerUtilsCaller.sol"; + +// Libraries +import { GameTypes } from "src/dispute/lib/Types.sol"; +import { Constants } from "src/libraries/Constants.sol"; +import { Features } from "src/libraries/Features.sol"; + +// Interfaces +import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; +import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { GameType, Proposal } from "src/dispute/lib/Types.sol"; + +/// @title OPContractsManagerMigrator +/// @notice OPContractsManagerMigrator is a contract that provides the migration functionality for +/// migrating one or more OP Stack chains to use the Super Root dispute games and shared +/// dispute game contracts. +contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { + /// @notice Input for migrating one or more OP Stack chains to use the Super Root dispute games + /// and shared dispute game contracts. + struct MigrateInput { + ISystemConfig[] chainSystemConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; + Proposal startingAnchorRoot; + GameType startingRespectedGameType; + } + + /// @notice Thrown when a chain's ProxyAdmin owner does not match the other chains. + error OPContractsManagerMigrator_ProxyAdminOwnerMismatch(); + + /// @notice Thrown when a chain's SuperchainConfig does not match the other chains. + error OPContractsManagerMigrator_SuperchainConfigMismatch(); + + /// @notice Thrown when the starting respected game type is not a valid super game type. + error OPContractsManagerMigrator_InvalidStartingRespectedGameType(); + + /// @notice Container of blueprint and implementation contract addresses. + IOPContractsManagerContainer public immutable contractsContainer; + + /// @param _contractsContainer The container of blueprint and implementation contract addresses. + /// @param _utils The utility functions for the OPContractsManager. + constructor( + IOPContractsManagerContainer _contractsContainer, + IOPContractsManagerUtils _utils + ) + OPContractsManagerUtilsCaller(_utils) + { + contractsContainer = _contractsContainer; + } + + /// @notice Migrates one or more OP Stack chains to use the Super Root dispute games and shared + /// dispute game contracts. + /// @dev WARNING: This is a one-way operation. You cannot easily undo this operation without a + /// smart contract upgrade. Do not call this function unless you are 100% confident that + /// you know what you're doing and that you are prepared to fully execute this migration. + /// @dev NOTE: Unlike other functions in OPCM, this is a one-off function used to serve the + /// temporary need to support the interop migration action. It will likely be removed in + /// the near future once interop support is baked more directly into OPCM. It does NOT + /// look or function like all of the other functions in OPCMv2. + /// @param _input The input parameters for the migration. + function migrate(MigrateInput calldata _input) public { + // Check that the starting respected game type is a valid super game type. + if ( + _input.startingRespectedGameType.raw() != GameTypes.SUPER_CANNON.raw() + && _input.startingRespectedGameType.raw() != GameTypes.SUPER_PERMISSIONED_CANNON.raw() + ) { + revert OPContractsManagerMigrator_InvalidStartingRespectedGameType(); + } + + // Check that all of the chains have the same core contracts. + for (uint256 i = 0; i < _input.chainSystemConfigs.length; i++) { + // Different chains might actually have different ProxyAdmin contracts, but it's fine + // as long as the owner of all of those contracts is the same. + if (_input.chainSystemConfigs[i].proxyAdmin().owner() != _input.chainSystemConfigs[0].proxyAdmin().owner()) + { + revert OPContractsManagerMigrator_ProxyAdminOwnerMismatch(); + } + + // Each chain must have the same SuperchainConfig. + if (_input.chainSystemConfigs[i].superchainConfig() != _input.chainSystemConfigs[0].superchainConfig()) { + revert OPContractsManagerMigrator_SuperchainConfigMismatch(); + } + } + + // NOTE: Interop doesn't have a real chain ID, and the chain ID provided here is ONLY used + // as a salt mixer, so we just use the block.timestamp instead. It really doesn't matter + // what we use here. + IOPContractsManagerUtils.ProxyDeployArgs memory proxyDeployArgs = IOPContractsManagerUtils.ProxyDeployArgs({ + proxyAdmin: _input.chainSystemConfigs[0].proxyAdmin(), + addressManager: IAddressManager(address(0)), // AddressManager NOT needed for these proxies. + l2ChainId: block.timestamp, + saltMixer: "interop salt mixer" + }); + + // Set up the extra instructions to allow all proxy deployments. + IOPContractsManagerUtils.ExtraInstruction[] memory extraInstructions = + new IOPContractsManagerUtils.ExtraInstruction[](1); + extraInstructions[0] = IOPContractsManagerUtils.ExtraInstruction({ + key: Constants.PERMITTED_PROXY_DEPLOYMENT_KEY, + data: bytes(Constants.PERMIT_ALL_CONTRACTS_INSTRUCTION) + }); + + // Deploy the new ETHLockbox. + IETHLockbox ethLockbox = IETHLockbox( + _loadOrDeployProxy( + address(0), // Source from address(0) so we always deploy a new proxy. + bytes4(0), + proxyDeployArgs, + "ETHLockbox", + extraInstructions + ) + ); + + // Deploy the new DisputeGameFactory. + IDisputeGameFactory disputeGameFactory = IDisputeGameFactory( + _loadOrDeployProxy( + address(0), // Source from address(0) so we always deploy a new proxy. + bytes4(0), + proxyDeployArgs, + "DisputeGameFactory", + extraInstructions + ) + ); + + // Deploy the new AnchorStateRegistry. + IAnchorStateRegistry anchorStateRegistry = IAnchorStateRegistry( + _loadOrDeployProxy( + address(0), // Source from address(0) so we always deploy a new proxy. + bytes4(0), + proxyDeployArgs, + "AnchorStateRegistry", + extraInstructions + ) + ); + + // Deploy the new DelayedWETH. + IDelayedWETH delayedWETH = IDelayedWETH( + _loadOrDeployProxy( + address(0), // Source from address(0) so we always deploy a new proxy. + bytes4(0), + proxyDeployArgs, + "DelayedWETH", + extraInstructions + ) + ); + + // Separate context to avoid stack too deep (isolate the implementations variable). + { + // Grab the implementations. + IOPContractsManagerContainer.Implementations memory impls = contractsContainer.implementations(); + + // Initialize the new ETHLockbox. + _upgrade( + proxyDeployArgs.proxyAdmin, + address(ethLockbox), + impls.ethLockboxImpl, + abi.encodeCall(IETHLockbox.initialize, (_input.chainSystemConfigs[0], new IOptimismPortal[](0))) + ); + + // Initialize the new DisputeGameFactory. + _upgrade( + proxyDeployArgs.proxyAdmin, + address(disputeGameFactory), + impls.disputeGameFactoryImpl, + abi.encodeCall(IDisputeGameFactory.initialize, (proxyDeployArgs.proxyAdmin.owner())) + ); + + // Initialize the new AnchorStateRegistry. + _upgrade( + proxyDeployArgs.proxyAdmin, + address(anchorStateRegistry), + impls.anchorStateRegistryImpl, + abi.encodeCall( + IAnchorStateRegistry.initialize, + ( + _input.chainSystemConfigs[0], + disputeGameFactory, + _input.startingAnchorRoot, + _input.startingRespectedGameType + ) + ) + ); + + // Initialize the new DelayedWETH. + _upgrade( + proxyDeployArgs.proxyAdmin, + address(delayedWETH), + impls.delayedWETHImpl, + abi.encodeCall(IDelayedWETH.initialize, (_input.chainSystemConfigs[0])) + ); + + // Migrate each portal to the new ETHLockbox and AnchorStateRegistry. + for (uint256 i = 0; i < _input.chainSystemConfigs.length; i++) { + _migratePortal(_input.chainSystemConfigs[i], ethLockbox, anchorStateRegistry); + } + } + + // Set up the dispute games in the new DisputeGameFactory. + for (uint256 i = 0; i < _input.disputeGameConfigs.length; i++) { + disputeGameFactory.setImplementation( + _input.disputeGameConfigs[i].gameType, + _getGameImpl(_input.disputeGameConfigs[i].gameType), + _makeGameArgs(0, anchorStateRegistry, delayedWETH, _input.disputeGameConfigs[i]) + ); + disputeGameFactory.setInitBond(_input.disputeGameConfigs[i].gameType, _input.disputeGameConfigs[i].initBond); + } + } + + /// @notice Migrates a single portal to the new ETHLockbox and AnchorStateRegistry. + /// @param _systemConfig The system config for the chain being migrated. + /// @param _newLockbox The new ETHLockbox. + /// @param _newASR The new AnchorStateRegistry. + function _migratePortal( + ISystemConfig _systemConfig, + IETHLockbox _newLockbox, + IAnchorStateRegistry _newASR + ) + internal + { + // Convert portal to interop portal interface, and grab existing ETHLockbox and DGF. + IOptimismPortalInterop portal = IOptimismPortalInterop(payable(_systemConfig.optimismPortal())); + IETHLockbox existingLockbox = IETHLockbox(payable(address(portal.ethLockbox()))); + IDisputeGameFactory existingDGF = IDisputeGameFactory(payable(address(portal.disputeGameFactory()))); + + // Authorize the portal on the new ETHLockbox. + _newLockbox.authorizePortal(IOptimismPortal(payable(address(portal)))); + + // Authorize the existing ETHLockbox to use the new ETHLockbox. + _newLockbox.authorizeLockbox(existingLockbox); + + // Migrate the existing ETHLockbox to the new ETHLockbox. + existingLockbox.migrateLiquidity(_newLockbox); + + // Clear out any implementations that might exist in the old DisputeGameFactory proxy. + // We clear out all potential game types to be safe. + existingDGF.setImplementation(GameTypes.CANNON, IDisputeGame(address(0)), hex""); + existingDGF.setImplementation(GameTypes.SUPER_CANNON, IDisputeGame(address(0)), hex""); + existingDGF.setImplementation(GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(0)), hex""); + existingDGF.setImplementation(GameTypes.SUPER_PERMISSIONED_CANNON, IDisputeGame(address(0)), hex""); + existingDGF.setImplementation(GameTypes.CANNON_KONA, IDisputeGame(address(0)), hex""); + existingDGF.setImplementation(GameTypes.SUPER_CANNON_KONA, IDisputeGame(address(0)), hex""); + + // Enable the ETH lockbox feature on the SystemConfig if not already enabled. + // This is needed for the SystemConfig's paused() function to use the correct identifier. + if (!_systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { + _systemConfig.setFeature(Features.ETH_LOCKBOX, true); + } + + // Migrate the portal to the new ETHLockbox and AnchorStateRegistry. + // This also sets superRootsActive = true. + // NOTE: This requires the portal to already be upgraded to the interop version + // (OptimismPortalInterop). If the portal is not on the interop version, this call will + // fail. + portal.migrateToSuperRoots(_newLockbox, _newASR); + } +} diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol index 6aad38d7520c3..246b740103ef3 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol @@ -6,13 +6,18 @@ import { LibString } from "@solady/utils/LibString.sol"; import { SemverComp } from "src/libraries/SemverComp.sol"; import { Blueprint } from "src/libraries/Blueprint.sol"; import { Constants } from "src/libraries/Constants.sol"; +import { GameType, GameTypes } from "src/dispute/lib/Types.sol"; // Interfaces import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IStorageSetter } from "interfaces/universal/IStorageSetter.sol"; import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; /// @title OPContractsManagerUtils /// @notice OPContractsManagerUtils is a contract that provides utility functions for the OPContractsManager. @@ -226,8 +231,8 @@ contract OPContractsManagerUtils { // Try to load the proxy from the source. (bool success, bytes memory result) = address(_source).staticcall(abi.encodePacked(_selector)); - // If the load succeeded and the result is not a zero address, return the result. - if (success && abi.decode(result, (address)) != address(0)) { + // If the load succeeded, returned valid data, and the result is not a zero address, return the result. + if (success && result.length >= 32 && abi.decode(result, (address)) != address(0)) { return payable(abi.decode(result, (address))); } else if (!loadCanFail) { // Load not permitted to fail but did, revert. @@ -329,4 +334,77 @@ contract OPContractsManagerUtils { function blueprints() public view returns (IOPContractsManagerContainer.Blueprints memory) { return contractsContainer.blueprints(); } + + /// @notice Helper for retrieving the dispute game implementation for a given game type. + /// @param _gameType The game type to retrieve the implementation for. + /// @return The dispute game implementation. + function getGameImpl(GameType _gameType) public view returns (IDisputeGame) { + IOPContractsManagerContainer.Implementations memory impls = implementations(); + if (_gameType.raw() == GameTypes.CANNON.raw()) { + return IDisputeGame(impls.faultDisputeGameV2Impl); + } else if (_gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw()) { + return IDisputeGame(impls.permissionedDisputeGameV2Impl); + } else if (_gameType.raw() == GameTypes.CANNON_KONA.raw()) { + return IDisputeGame(impls.faultDisputeGameV2Impl); + } else if (_gameType.raw() == GameTypes.SUPER_CANNON.raw()) { + return IDisputeGame(impls.superFaultDisputeGameImpl); + } else if (_gameType.raw() == GameTypes.SUPER_PERMISSIONED_CANNON.raw()) { + return IDisputeGame(impls.superPermissionedDisputeGameImpl); + } else if (_gameType.raw() == GameTypes.SUPER_CANNON_KONA.raw()) { + return IDisputeGame(impls.superFaultDisputeGameImpl); + } else { + revert IOPContractsManagerUtils.OPContractsManagerUtils_UnsupportedGameType(); + } + } + + /// @notice Helper for creating game constructor arguments. + /// @param _l2ChainId The L2 chain ID. + /// @param _anchorStateRegistry The AnchorStateRegistry to use for dispute games. + /// @param _delayedWETH The DelayedWETH to use for dispute games. + /// @param _gcfg Configuration for the dispute game to create. + /// @return The game constructor arguments. + function makeGameArgs( + uint256 _l2ChainId, + IAnchorStateRegistry _anchorStateRegistry, + IDelayedWETH _delayedWETH, + IOPContractsManagerUtils.DisputeGameConfig memory _gcfg + ) + public + view + returns (bytes memory) + { + IOPContractsManagerContainer.Implementations memory impls = implementations(); + if ( + _gcfg.gameType.raw() == GameTypes.CANNON.raw() || _gcfg.gameType.raw() == GameTypes.CANNON_KONA.raw() + || _gcfg.gameType.raw() == GameTypes.SUPER_CANNON.raw() + || _gcfg.gameType.raw() == GameTypes.SUPER_CANNON_KONA.raw() + ) { + IOPContractsManagerUtils.FaultDisputeGameConfig memory parsedInputArgs = + abi.decode(_gcfg.gameArgs, (IOPContractsManagerUtils.FaultDisputeGameConfig)); + return abi.encodePacked( + parsedInputArgs.absolutePrestate, + impls.mipsImpl, + address(_anchorStateRegistry), + address(_delayedWETH), + _l2ChainId + ); + } else if ( + _gcfg.gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw() + || _gcfg.gameType.raw() == GameTypes.SUPER_PERMISSIONED_CANNON.raw() + ) { + IOPContractsManagerUtils.PermissionedDisputeGameConfig memory parsedInputArgs = + abi.decode(_gcfg.gameArgs, (IOPContractsManagerUtils.PermissionedDisputeGameConfig)); + return abi.encodePacked( + parsedInputArgs.absolutePrestate, + impls.mipsImpl, + address(_anchorStateRegistry), + address(_delayedWETH), + _l2ChainId, + parsedInputArgs.proposer, + parsedInputArgs.challenger + ); + } else { + revert IOPContractsManagerUtils.OPContractsManagerUtils_UnsupportedGameType(); + } + } } diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtilsCaller.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtilsCaller.sol index 22e4ce10ce1fd..63eb3bdb8ee10 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtilsCaller.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtilsCaller.sol @@ -1,9 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries +import { GameType } from "src/dispute/lib/Types.sol"; + // Interfaces import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; /// @title OPContractsManagerUtilsCaller /// @notice OPContractsManagerUtilsCaller is an abstract contract that exists to hide all of the @@ -206,4 +212,38 @@ abstract contract OPContractsManagerUtilsCaller { } return result; } + + /// @notice Helper for retrieving the dispute game implementation for a given game type. + /// @param _gameType The game type to retrieve the implementation for. + /// @return The dispute game implementation. + function _getGameImpl(GameType _gameType) internal view returns (IDisputeGame) { + return + abi.decode(_staticcall(abi.encodeCall(IOPContractsManagerUtils.getGameImpl, (_gameType))), (IDisputeGame)); + } + + /// @notice Helper for creating game constructor arguments. + /// @param _l2ChainId The L2 chain ID. + /// @param _anchorStateRegistry The AnchorStateRegistry to use for dispute games. + /// @param _delayedWETH The DelayedWETH to use for dispute games. + /// @param _gcfg Configuration for the dispute game to create. + /// @return The game constructor arguments. + function _makeGameArgs( + uint256 _l2ChainId, + IAnchorStateRegistry _anchorStateRegistry, + IDelayedWETH _delayedWETH, + IOPContractsManagerUtils.DisputeGameConfig memory _gcfg + ) + internal + view + returns (bytes memory) + { + return abi.decode( + _staticcall( + abi.encodeCall( + IOPContractsManagerUtils.makeGameArgs, (_l2ChainId, _anchorStateRegistry, _delayedWETH, _gcfg) + ) + ), + (bytes) + ); + } } diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol index 3b937106b27b8..d868339fc2c92 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol @@ -3,10 +3,11 @@ pragma solidity 0.8.15; // Contracts import { OPContractsManagerUtilsCaller } from "src/L1/opcm/OPContractsManagerUtilsCaller.sol"; +import { IOPContractsManagerMigrator } from "interfaces/L1/opcm/IOPContractsManagerMigrator.sol"; // Libraries import { Blueprint } from "src/libraries/Blueprint.sol"; -import { Claim, GameType, GameTypes, Proposal } from "src/dispute/lib/Types.sol"; +import { GameType, GameTypes, Proposal } from "src/dispute/lib/Types.sol"; import { SemverComp } from "src/libraries/SemverComp.sol"; import { Features } from "src/libraries/Features.sol"; import { DevFeatures } from "src/libraries/DevFeatures.sol"; @@ -54,26 +55,6 @@ import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManager /// design. Look at _apply, squint, and imagine that it can output an upgrade plan rather than /// actually executing the upgrade, and then you'll see how it can be improved. contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { - /// @notice Configuration struct for the FaultDisputeGame. - struct FaultDisputeGameConfig { - Claim absolutePrestate; - } - - /// @notice Configuration struct for the PermissionedDisputeGame. - struct PermissionedDisputeGameConfig { - Claim absolutePrestate; - address proposer; - address challenger; - } - - /// @notice Generic dispute game configuration data. - struct DisputeGameConfig { - bool enabled; - uint256 initBond; - GameType gameType; - bytes gameArgs; - } - /// @notice Contracts that represent the Superchain system. struct SuperchainContracts { ISuperchainConfig superchainConfig; @@ -115,7 +96,7 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { uint256 l2ChainId; IResourceMetering.ResourceConfig resourceConfig; // Dispute game configuration. - DisputeGameConfig[] disputeGameConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; // CGT bool useCustomGasToken; } @@ -123,7 +104,7 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @notice Partial input required for an upgrade. struct UpgradeInput { ISystemConfig systemConfig; - DisputeGameConfig[] disputeGameConfigs; + IOPContractsManagerUtils.DisputeGameConfig[] disputeGameConfigs; IOPContractsManagerUtils.ExtraInstruction[] extraInstructions; } @@ -136,9 +117,6 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @notice Thrown when the SuperchainConfig needs to be upgraded. error OPContractsManagerV2_SuperchainConfigNeedsUpgrade(); - /// @notice Thrown when an unsupported game type is provided. - error OPContractsManagerV2_UnsupportedGameType(); - /// @notice Thrown when an invalid game config is provided. error OPContractsManagerV2_InvalidGameConfigs(); @@ -160,6 +138,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @notice Address of the Standard Validator for this OPCM release. IOPContractsManagerStandardValidator public immutable opcmStandardValidator; + /// @notice Address of the Migrator contract for this OPCM release. + IOPContractsManagerMigrator public immutable opcmMigrator; + /// @notice Immutable reference to this OPCM contract so that the address of this contract can /// be used when this contract is DELEGATECALLed. OPContractsManagerV2 public immutable opcmV2; @@ -169,23 +150,26 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// - Major bump: New required sequential upgrade /// - Minor bump: Replacement OPCM for same upgrade /// - Patch bump: Development changes (expected for normal dev work) - /// @custom:semver 7.0.1 + /// @custom:semver 7.0.2 function version() public pure returns (string memory) { - return "7.0.1"; + return "7.0.2"; } /// @param _contractsContainer The container of blueprint and implementation contract addresses. /// @param _standardValidator The standard validator for this OPCM release. + /// @param _migrator The migrator contract for this OPCM release. /// @param _utils The utility functions for the OPContractsManager. constructor( IOPContractsManagerContainer _contractsContainer, IOPContractsManagerStandardValidator _standardValidator, + IOPContractsManagerMigrator _migrator, IOPContractsManagerUtils _utils ) OPContractsManagerUtilsCaller(_utils) { contractsContainer = _contractsContainer; opcmStandardValidator = _standardValidator; + opcmMigrator = _migrator; opcmV2 = this; } @@ -270,6 +254,27 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { return _apply(cfg, cts, false); } + /// @notice Migrates one or more OP Stack chains to use the Super Root dispute games and shared + /// dispute game contracts. + /// @dev WARNING: This is a one-way operation. You cannot easily undo this operation without a + /// smart contract upgrade. Do not call this function unless you are 100% confident that + /// you know what you're doing and that you are prepared to fully execute this migration. + /// @dev NOTE: Unlike other functions in OPCM, this is a one-off function used to serve the + /// temporary need to support the interop migration action. It will likely be removed in + /// the near future once interop support is baked more directly into OPCM. It does NOT + /// look or function like all of the other functions in OPCMv2. + /// @param _input The input parameters for the migration. + function migrate(IOPContractsManagerMigrator.MigrateInput calldata _input) public { + // Delegatecall to the migrator contract. + (bool success, bytes memory result) = + address(opcmMigrator).delegatecall(abi.encodeCall(IOPContractsManagerMigrator.migrate, (_input))); + if (!success) { + assembly { + revert(add(result, 0x20), mload(result)) + } + } + } + /////////////////////////////////////////////////////////////////////////// // INTERNAL CHAIN MANAGEMENT FUNCTIONS // /////////////////////////////////////////////////////////////////////////// @@ -327,7 +332,6 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @param _saltMixer The salt mixer for creating new proxies if needed. /// @param _extraInstructions The extra upgrade instructions for the chain. /// @return The chain contracts. - function _loadChainContracts( ISystemConfig _systemConfig, uint256 _l2ChainId, @@ -840,7 +844,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { // If the game is enabled, grab the implementation and craft the game arguments. if (_cfg.disputeGameConfigs[i].enabled) { gameImpl = _getGameImpl(_cfg.disputeGameConfigs[i].gameType); - gameArgs = _makeGameArgs(_cfg, _cts, _cfg.disputeGameConfigs[i]); + gameArgs = _makeGameArgs( + _cfg.l2ChainId, _cts.anchorStateRegistry, _cts.delayedWETH, _cfg.disputeGameConfigs[i] + ); } // Set the game implementation and arguments. @@ -927,67 +933,6 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { ); } - /// @notice Helper for retrieving dispute game implementations. - /// @param _gameType The game type to retrieve the implementation for. - /// @return The dispute game implementation. - function _getGameImpl(GameType _gameType) internal view returns (IDisputeGame) { - IOPContractsManagerContainer.Implementations memory impls = implementations(); - if (_gameType.raw() == GameTypes.CANNON.raw()) { - return IDisputeGame(impls.faultDisputeGameV2Impl); - } else if (_gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw()) { - return IDisputeGame(impls.permissionedDisputeGameV2Impl); - } else if (_gameType.raw() == GameTypes.CANNON_KONA.raw()) { - return IDisputeGame(impls.faultDisputeGameV2Impl); - } else { - // Since we assert in _assertValidFullConfig that we only have valid configs, this - // should never happen, but we'll be defensive and revert if it does. - revert OPContractsManagerV2_UnsupportedGameType(); - } - } - - /// @notice Helper for creating game constructor arguments. - /// @param _cfg Full chain config. - /// @param _cts Chain contracts. - /// @param _gcfg Configuration for the dispute game to create. - /// @return The game constructor arguments. - function _makeGameArgs( - FullConfig memory _cfg, - ChainContracts memory _cts, - DisputeGameConfig memory _gcfg - ) - internal - view - returns (bytes memory) - { - IOPContractsManagerContainer.Implementations memory impls = implementations(); - if (_gcfg.gameType.raw() == GameTypes.CANNON.raw() || _gcfg.gameType.raw() == GameTypes.CANNON_KONA.raw()) { - FaultDisputeGameConfig memory parsedInputArgs = abi.decode(_gcfg.gameArgs, (FaultDisputeGameConfig)); - return abi.encodePacked( - parsedInputArgs.absolutePrestate, - impls.mipsImpl, - address(_cts.anchorStateRegistry), - address(_cts.delayedWETH), - _cfg.l2ChainId - ); - } else if (_gcfg.gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw()) { - PermissionedDisputeGameConfig memory parsedInputArgs = - abi.decode(_gcfg.gameArgs, (PermissionedDisputeGameConfig)); - return abi.encodePacked( - parsedInputArgs.absolutePrestate, - impls.mipsImpl, - address(_cts.anchorStateRegistry), - address(_cts.delayedWETH), - _cfg.l2ChainId, - parsedInputArgs.proposer, - parsedInputArgs.challenger - ); - } else { - // Since we assert in _assertValidFullConfig that we only have valid configs, this - // should never happen, but we'll be defensive and revert if it does. - revert OPContractsManagerV2_UnsupportedGameType(); - } - } - /////////////////////////////////////////////////////////////////////////// // PUBLIC UTILITY FUNCTIONS // /////////////////////////////////////////////////////////////////////////// diff --git a/packages/contracts-bedrock/src/L2/L1Withdrawer.sol b/packages/contracts-bedrock/src/L2/L1Withdrawer.sol index bbaf18e46389e..8b55fdea67ed4 100644 --- a/packages/contracts-bedrock/src/L2/L1Withdrawer.sol +++ b/packages/contracts-bedrock/src/L2/L1Withdrawer.sol @@ -51,14 +51,14 @@ contract L1Withdrawer is ISemver { event WithdrawalGasLimitUpdated(uint32 oldWithdrawalGasLimit, uint32 newWithdrawalGasLimit); /// @notice Semantic version. - /// @custom:semver 1.0.0 - string public constant version = "1.0.0"; + /// @custom:semver 1.0.1 + string public constant version = "1.0.1"; /// @notice Constructs the L1Withdrawer contract. /// @param _minWithdrawalAmount The minimum amount of ETH required to trigger a withdrawal. /// @param _recipient The L1 address that will receive withdrawals. /// @param _withdrawalGasLimit The gas limit for the L1 withdrawal transaction. - /// @dev If target on L1 is `FeesDepositor`, the gas limit should be above 800k gas. + /// @dev If target on L1 is `FeesDepositor`, the gas limit should be at or above 800k gas. constructor(uint256 _minWithdrawalAmount, address _recipient, uint32 _withdrawalGasLimit) { minWithdrawalAmount = _minWithdrawalAmount; recipient = _recipient; @@ -105,7 +105,7 @@ contract L1Withdrawer is ISemver { /// @notice Updates the withdrawal gas limit. Only callable by the ProxyAdmin owner. /// @param _newWithdrawalGasLimit The new withdrawal gas limit. - /// @dev If target on L1 is `FeesDepositor`, the gas limit should be above 800k gas. + /// @dev If target on L1 is `FeesDepositor`, the gas limit should be at or above 800k gas. function setWithdrawalGasLimit(uint32 _newWithdrawalGasLimit) external { if (msg.sender != IProxyAdmin(Predeploys.PROXY_ADMIN).owner()) { revert L1Withdrawer_OnlyProxyAdminOwner(); diff --git a/packages/contracts-bedrock/src/cannon/MIPS64.sol b/packages/contracts-bedrock/src/cannon/MIPS64.sol index c00c705f4a610..644f478ce0225 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS64.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS64.sol @@ -66,8 +66,8 @@ contract MIPS64 is ISemver { } /// @notice The semantic version of the MIPS64 contract. - /// @custom:semver 1.9.0 - string public constant version = "1.9.0"; + /// @custom:semver 1.10.0 + string public constant version = "1.10.0"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -92,8 +92,8 @@ contract MIPS64 is ISemver { /// @param _oracle The address of the preimage oracle contract. constructor(IPreimageOracle _oracle, uint256 _stateVersion) { - // Supports VersionMultiThreaded64_v4 (7) and VersionMultiThreaded64_v5 (8) - if (_stateVersion != 7 && _stateVersion != 8) { + // Supports VersionMultiThreaded64_v5 (8) + if (_stateVersion != 8) { revert UnsupportedStateVersion(); } ORACLE = _oracle; @@ -560,10 +560,7 @@ contract MIPS64 is ISemver { v0 = 0; v1 = 0; } else if (syscall_no == sys.SYS_GETRANDOM) { - if (st.featuresForVersion(STATE_VERSION).supportWorkingSysGetRandom) { - (v0, v1, state.memRoot) = syscallGetRandom(state, a0, a1); - } - // Otherwise, ignored (noop) + (v0, v1, state.memRoot) = syscallGetRandom(state, a0, a1); } else if (syscall_no == sys.SYS_MUNMAP) { // ignored } else if (syscall_no == sys.SYS_MPROTECT) { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol index c19d1f66a0005..c7102dea0fdd7 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol @@ -12,19 +12,9 @@ library MIPS64State { uint64 hi; } - struct Features { - bool supportWorkingSysGetRandom; - } - function assertExitedIsValid(uint32 _exited) internal pure { if (_exited > 1) { revert InvalidExitedValue(); } } - - function featuresForVersion(uint256 _version) internal pure returns (Features memory features_) { - if (_version >= 8) { - features_.supportWorkingSysGetRandom = true; - } - } } diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index 8689221a03a8a..97f4e0506ec35 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -58,7 +58,8 @@ import { GameNotResolved, ReservedGameType, GamePaused, - BadExtraData + BadExtraData, + UnknownChainId } from "src/dispute/lib/Errors.sol"; // Interfaces @@ -172,9 +173,9 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 1.8.0 + /// @custom:semver 1.9.0 function version() public pure virtual returns (string memory) { - return "1.8.0"; + return "1.9.0"; } /// @notice The starting timestamp of the game @@ -865,6 +866,14 @@ contract FaultDisputeGame is Clone, ISemver { rootClaim_ = Claim.wrap(_getArgBytes32(20)); } + /// @notice Getter for the root claim for a given L2 chain ID. + /// @param _chainId The L2 chain ID to get the root claim for. + /// @return rootClaim_ The root claim of the DisputeGame. + function rootClaimByChainId(uint256 _chainId) public view returns (Claim rootClaim_) { + if (_chainId != L2_CHAIN_ID) revert UnknownChainId(); + rootClaim_ = rootClaim(); + } + /// @notice Getter for the parent hash of the L1 block when the dispute game was created. /// @dev `clones-with-immutable-args` argument #3 /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. diff --git a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol index f356190ccdd7e..5dc45e46a3d37 100644 --- a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol @@ -32,9 +32,9 @@ contract PermissionedDisputeGame is FaultDisputeGame { } /// @notice Semantic version. - /// @custom:semver 1.8.0 + /// @custom:semver 1.9.0 function version() public pure override returns (string memory) { - return "1.8.0"; + return "1.9.0"; } /// @param _params Parameters for creating a new FaultDisputeGame. diff --git a/packages/contracts-bedrock/src/dispute/lib/Errors.sol b/packages/contracts-bedrock/src/dispute/lib/Errors.sol index fa23f8d7d86a1..2f5ce0dd777d5 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Errors.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Errors.sol @@ -133,6 +133,9 @@ error GameNotResolved(); /// @notice Thrown when a reserved game type is used. error ReservedGameType(); +/// @notice Thrown when an unknown chain ID is passed to rootClaimByChainId. +error UnknownChainId(); + //////////////////////////////////////////////////////////////// // `PermissionedDisputeGame` Errors // //////////////////////////////////////////////////////////////// @@ -151,7 +154,7 @@ error GamePaused(); error InvalidGameArgsLength(); //////////////////////////////////////////////////////////////// -// `OPSuccinctFaultDisputeGame` Errors // +// `OptimisticZkGame` Errors // //////////////////////////////////////////////////////////////// /// @notice Thrown when the claim has already been challenged. @@ -177,10 +180,3 @@ error InvalidProposalStatus(); /// @notice Thrown when the game is initialized by an incorrect factory. error IncorrectDisputeGameFactory(); - -//////////////////////////////////////////////////////////////// -// `SuperFaultDisputeGame` Errors // -//////////////////////////////////////////////////////////////// - -/// @notice Thrown when an unknown chain ID is encountered. -error UnknownChainId(); diff --git a/packages/contracts-bedrock/src/dispute/lib/Types.sol b/packages/contracts-bedrock/src/dispute/lib/Types.sol index 54f795db549c0..eb9438e2b5b50 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Types.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Types.sol @@ -89,6 +89,8 @@ library GameTypes { /// @notice A dispute game type that uses RISC Zero's Kailua GameType internal constant KAILUA = GameType.wrap(1337); + + GameType internal constant OPTIMISTIC_ZK_GAME_TYPE = GameType.wrap(10); } /// @title VMStatuses @@ -127,11 +129,9 @@ library LocalPreimageKey { } //////////////////////////////////////////////////////////////// -// `OPSuccinctFaultDisputeGame` Types // +// `OptimisticZkGame` Types // //////////////////////////////////////////////////////////////// -uint32 constant OP_SUCCINCT_FAULT_DISPUTE_GAME_TYPE = 42; - /// @notice The public values committed to for an OP Succinct aggregation program. struct AggregationOutputs { bytes32 l1Head; diff --git a/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol b/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol index eb373b027c45f..1072a12458c76 100644 --- a/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol +++ b/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol @@ -57,7 +57,8 @@ import { InvalidBondDistributionMode, GameNotResolved, GamePaused, - BadExtraData + BadExtraData, + UnknownChainId } from "src/dispute/lib/Errors.sol"; // Interfaces @@ -146,9 +147,9 @@ contract FaultDisputeGameV2 is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 2.2.0 + /// @custom:semver 2.3.0 function version() public pure virtual returns (string memory) { - return "2.2.0"; + return "2.3.0"; } /// @notice The starting timestamp of the game @@ -845,6 +846,14 @@ contract FaultDisputeGameV2 is Clone, ISemver { rootClaim_ = Claim.wrap(_getArgBytes32(20)); } + /// @notice Getter for the root claim for a given L2 chain ID. + /// @param _chainId The L2 chain ID to get the root claim for. + /// @return rootClaim_ The root claim of the DisputeGame. + function rootClaimByChainId(uint256 _chainId) public pure returns (Claim rootClaim_) { + if (_chainId != l2ChainId()) revert UnknownChainId(); + rootClaim_ = rootClaim(); + } + /// @notice Getter for the parent hash of the L1 block when the dispute game was created. /// @dev `clones-with-immutable-args` argument #3 /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. diff --git a/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol b/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol index d0a9faa9afe2f..9f2a58ab712b4 100644 --- a/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol +++ b/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol @@ -26,9 +26,9 @@ contract PermissionedDisputeGameV2 is FaultDisputeGameV2 { } /// @notice Semantic version. - /// @custom:semver 2.2.0 + /// @custom:semver 2.3.0 function version() public pure override returns (string memory) { - return "2.2.0"; + return "2.3.0"; } /// @param _params Parameters for creating a new FaultDisputeGame. diff --git a/packages/contracts-bedrock/src/dispute/zk/AccessManager.sol b/packages/contracts-bedrock/src/dispute/zk/AccessManager.sol index fe2d5fdb0adda..b03b034a1a3f4 100644 --- a/packages/contracts-bedrock/src/dispute/zk/AccessManager.sol +++ b/packages/contracts-bedrock/src/dispute/zk/AccessManager.sol @@ -1,15 +1,14 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.15; +pragma solidity 0.8.15; import { Ownable } from "@openzeppelin/contracts/access/Ownable.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; -import { GameType } from "src/dispute/lib/Types.sol"; +import { GameType, GameTypes } from "src/dispute/lib/Types.sol"; import { Timestamp } from "src/dispute/lib/LibUDT.sol"; -import { OP_SUCCINCT_FAULT_DISPUTE_GAME_TYPE } from "src/dispute/lib/Types.sol"; /// @title AccessManager /// @notice Manages permissions for dispute game proposers and challengers. -abstract contract AccessManager is Ownable { +contract AccessManager is Ownable { //////////////////////////////////////////////////////////////// // Events // //////////////////////////////////////////////////////////////// @@ -79,7 +78,7 @@ abstract contract AccessManager is Ownable { /// @return The last proposal timestamp. function getLastProposalTimestamp() public view returns (uint256) { // Get the latest game to check its timestamp. - GameType gameType = GameType.wrap(OP_SUCCINCT_FAULT_DISPUTE_GAME_TYPE); + GameType gameType = GameTypes.OPTIMISTIC_ZK_GAME_TYPE; uint256 numGames = DISPUTE_GAME_FACTORY.gameCount(); // Early return if no games exist. diff --git a/packages/contracts-bedrock/src/dispute/zk/OPSuccinctFaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/zk/OptimisticZkGame.sol similarity index 97% rename from packages/contracts-bedrock/src/dispute/zk/OPSuccinctFaultDisputeGame.sol rename to packages/contracts-bedrock/src/dispute/zk/OptimisticZkGame.sol index f7c1e6e763985..a9c91d6f28991 100644 --- a/packages/contracts-bedrock/src/dispute/zk/OPSuccinctFaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/zk/OptimisticZkGame.sol @@ -13,7 +13,7 @@ import { Timestamp, Proposal } from "src/dispute/lib/Types.sol"; -import { AggregationOutputs, OP_SUCCINCT_FAULT_DISPUTE_GAME_TYPE } from "src/dispute/lib/Types.sol"; +import { AggregationOutputs, GameTypes } from "src/dispute/lib/Types.sol"; import { AlreadyInitialized, BadAuth, @@ -43,10 +43,10 @@ import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.so // Contracts import { AccessManager } from "src/dispute/zk/AccessManager.sol"; -/// @title OPSuccinctFaultDisputeGame +/// @title OptimisticZkGame /// @notice An implementation of the `IFaultDisputeGame` interface. /// @dev Derived from https://github.com/succinctlabs/op-succinct (at commit c13844a9bbc330cca69eef2538d8f8ec123e1653) -contract OPSuccinctFaultDisputeGame is Clone, ISemver, IDisputeGame { +contract OptimisticZkGame is Clone, ISemver, IDisputeGame { //////////////////////////////////////////////////////////////// // Enums // //////////////////////////////////////////////////////////////// @@ -146,8 +146,8 @@ contract OPSuccinctFaultDisputeGame is Clone, ISemver, IDisputeGame { AccessManager internal immutable ACCESS_MANAGER; /// @notice Semantic version. - /// @custom:semver 0.0.0 - string public constant version = "0.0.0"; + /// @custom:semver 0.0.2 + string public constant version = "0.0.2"; /// @notice The starting timestamp of the game. Timestamp public createdAt; @@ -202,7 +202,7 @@ contract OPSuccinctFaultDisputeGame is Clone, ISemver, IDisputeGame { AccessManager _accessManager ) { // Set up initial game state. - GAME_TYPE = GameType.wrap(OP_SUCCINCT_FAULT_DISPUTE_GAME_TYPE); + GAME_TYPE = GameTypes.OPTIMISTIC_ZK_GAME_TYPE; MAX_CHALLENGE_DURATION = _maxChallengeDuration; MAX_PROVE_DURATION = _maxProveDuration; DISPUTE_GAME_FACTORY = _disputeGameFactory; @@ -279,8 +279,8 @@ contract OPSuccinctFaultDisputeGame is Clone, ISemver, IDisputeGame { } startingProposal = Proposal({ - l2SequenceNumber: OPSuccinctFaultDisputeGame(address(proxy)).l2SequenceNumber(), - root: Hash.wrap(OPSuccinctFaultDisputeGame(address(proxy)).rootClaim().raw()) + l2SequenceNumber: OptimisticZkGame(address(proxy)).l2SequenceNumber(), + root: Hash.wrap(OptimisticZkGame(address(proxy)).rootClaim().raw()) }); // INVARIANT: The parent game must be a valid game. @@ -592,6 +592,13 @@ contract OPSuccinctFaultDisputeGame is Clone, ISemver, IDisputeGame { rootClaim_ = Claim.wrap(_getArgBytes32(0x14)); } + /// @notice Getter for the root claim for a given L2 chain ID. + /// @dev For pre-interop games, returns the root claim regardless of chain ID. + /// @return rootClaim_ The root claim of the DisputeGame. + function rootClaimByChainId(uint256) public pure returns (Claim rootClaim_) { + rootClaim_ = rootClaim(); + } + /// @notice Getter for the parent hash of the L1 block when the dispute game was created. /// @dev `clones-with-immutable-args` argument #3 /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. diff --git a/packages/contracts-bedrock/src/governance/MintManager.sol b/packages/contracts-bedrock/src/governance/MintManager.sol index 0f58e391c5121..2a764d1d42f0c 100644 --- a/packages/contracts-bedrock/src/governance/MintManager.sol +++ b/packages/contracts-bedrock/src/governance/MintManager.sol @@ -39,7 +39,8 @@ contract MintManager is Ownable { } /// @notice Only the token owner is allowed to mint a certain amount of the - /// governance token per year. + /// governance token per year. The first mint is uncapped to allow growing + /// the token supply from zero to a non-zero value. /// @param _account The account receiving minted tokens. /// @param _amount The amount of tokens to mint. function mint(address _account, uint256 _amount) public onlyOwner { diff --git a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol b/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol deleted file mode 100644 index 715bc8d64765e..0000000000000 --- a/packages/contracts-bedrock/src/vendor/asterisc/RISCV.sol +++ /dev/null @@ -1,1707 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.25; - -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; - -/// @title RISCV -/// @notice The RISCV contract emulates a single RISCV hart cycle statelessly, using memory proofs to verify the -/// instruction and optional memory access' inclusion in the memory merkle root provided in the trusted -/// prestate witness. -/// @dev https://github.com/ethereum-optimism/asterisc -contract RISCV is IBigStepper { - /// @notice The preimage oracle contract. - IPreimageOracle public oracle; - - /// @notice The version of the contract. - /// @custom:semver 1.2.0-rc.1 - string public constant version = "1.2.0-rc.1"; - - /// @param _oracle The preimage oracle contract. - constructor(IPreimageOracle _oracle) { - oracle = _oracle; - } - - /// @inheritdoc IBigStepper - function step(bytes calldata _stateData, bytes calldata _proof, bytes32 _localContext) public returns (bytes32) { - assembly { - function revertWithCode(code) { - mstore(0, code) - revert(0, 0x20) - } - - function preimageOraclePos() -> out { - // slot of preimageOraclePos field - out := 0 - } - - // - // Yul64 - functions to do 64 bit math - see yul64.go - // - function u64Mask() -> out { - // max uint64 - out := shr(192, not(0)) // 256-64 = 192 - } - - function u32Mask() -> out { - out := U64(shr(toU256(224), not(0))) // 256-32 = 224 - } - - function toU64(v) -> out { - out := v - } - - function shortToU64(v) -> out { - out := v - } - - function shortToU256(v) -> out { - out := v - } - - function longToU256(v) -> out { - out := v - } - - function u256ToU64(v) -> out { - out := and(v, U256(u64Mask())) - } - - function u64ToU256(v) -> out { - out := v - } - - function mask32Signed64(v) -> out { - out := signExtend64(and64(v, u32Mask()), toU64(31)) - } - - function u64Mod() -> out { - // 1 << 64 - out := shl(toU256(64), toU256(1)) - } - - function u64TopBit() -> out { - // 1 << 63 - out := shl(toU256(63), toU256(1)) - } - - function signExtend64(v, bit) -> out { - switch and(v, shl(bit, 1)) - case 0 { - // fill with zeroes, by masking - out := U64(and(U256(v), shr(sub(toU256(63), bit), U256(u64Mask())))) - } - default { - // fill with ones, by or-ing - out := U64(or(U256(v), shl(bit, shr(bit, U256(u64Mask()))))) - } - } - - function signExtend64To256(v) -> out { - switch and(U256(v), u64TopBit()) - case 0 { out := v } - default { out := or(shl(toU256(64), not(0)), v) } - } - - function add64(x, y) -> out { - out := U64(mod(add(U256(x), U256(y)), u64Mod())) - } - - function sub64(x, y) -> out { - out := U64(mod(sub(U256(x), U256(y)), u64Mod())) - } - - function mul64(x, y) -> out { - out := u256ToU64(mul(U256(x), U256(y))) - } - - function div64(x, y) -> out { - out := u256ToU64(div(U256(x), U256(y))) - } - - function sdiv64(x, y) -> out { - // note: signed overflow semantics are the same between Go and EVM assembly - out := u256ToU64(sdiv(signExtend64To256(x), signExtend64To256(y))) - } - - function mod64(x, y) -> out { - out := U64(mod(U256(x), U256(y))) - } - - function smod64(x, y) -> out { - out := u256ToU64(smod(signExtend64To256(x), signExtend64To256(y))) - } - - function not64(x) -> out { - out := u256ToU64(not(U256(x))) - } - - function lt64(x, y) -> out { - out := U64(lt(U256(x), U256(y))) - } - - function gt64(x, y) -> out { - out := U64(gt(U256(x), U256(y))) - } - - function slt64(x, y) -> out { - out := U64(slt(signExtend64To256(x), signExtend64To256(y))) - } - - function sgt64(x, y) -> out { - out := U64(sgt(signExtend64To256(x), signExtend64To256(y))) - } - - function eq64(x, y) -> out { - out := U64(eq(U256(x), U256(y))) - } - - function iszero64(x) -> out { - out := iszero(U256(x)) - } - - function and64(x, y) -> out { - out := U64(and(U256(x), U256(y))) - } - - function or64(x, y) -> out { - out := U64(or(U256(x), U256(y))) - } - - function xor64(x, y) -> out { - out := U64(xor(U256(x), U256(y))) - } - - function shl64(x, y) -> out { - out := u256ToU64(shl(U256(x), U256(y))) - } - - function shr64(x, y) -> out { - out := U64(shr(U256(x), U256(y))) - } - - function sar64(x, y) -> out { - out := u256ToU64(sar(U256(x), signExtend64To256(y))) - } - - // type casts, no-op in yul - function b32asBEWord(v) -> out { - out := v - } - function beWordAsB32(v) -> out { - out := v - } - function U64(v) -> out { - out := v - } - function U256(v) -> out { - out := v - } - function toU256(v) -> out { - out := v - } - - // - // Bit hacking util - // - function bitlen(x) -> n { - if gt(x, sub(shl(128, 1), 1)) { - x := shr(128, x) - n := add(n, 128) - } - if gt(x, sub(shl(64, 1), 1)) { - x := shr(64, x) - n := add(n, 64) - } - if gt(x, sub(shl(32, 1), 1)) { - x := shr(32, x) - n := add(n, 32) - } - if gt(x, sub(shl(16, 1), 1)) { - x := shr(16, x) - n := add(n, 16) - } - if gt(x, sub(shl(8, 1), 1)) { - x := shr(8, x) - n := add(n, 8) - } - if gt(x, sub(shl(4, 1), 1)) { - x := shr(4, x) - n := add(n, 4) - } - if gt(x, sub(shl(2, 1), 1)) { - x := shr(2, x) - n := add(n, 2) - } - if gt(x, sub(shl(1, 1), 1)) { - x := shr(1, x) - n := add(n, 1) - } - if gt(x, 0) { n := add(n, 1) } - } - - function endianSwap(x) -> out { - for { let i := 0 } lt(i, 32) { i := add(i, 1) } { - out := or(shl(8, out), and(x, 0xff)) - x := shr(8, x) - } - } - - // - // State layout - // - function stateSizeMemRoot() -> out { - out := 32 - } - function stateSizePreimageKey() -> out { - out := 32 - } - function stateSizePreimageOffset() -> out { - out := 8 - } - function stateSizePC() -> out { - out := 8 - } - function stateSizeExitCode() -> out { - out := 1 - } - function stateSizeExited() -> out { - out := 1 - } - function stateSizeStep() -> out { - out := 8 - } - function stateSizeHeap() -> out { - out := 8 - } - function stateSizeLoadReservation() -> out { - out := 8 - } - function stateSizeRegisters() -> out { - out := mul(8, 32) - } - - function stateOffsetMemRoot() -> out { - out := 0 - } - function stateOffsetPreimageKey() -> out { - out := 32 // 0 + 32 - // out := add(stateOffsetMemRoot(), stateSizeMemRoot()) - } - function stateOffsetPreimageOffset() -> out { - out := 64 // 32 + 32 - // out := add(stateOffsetPreimageKey(), stateSizePreimageKey()) - } - function stateOffsetPC() -> out { - out := 72 // 64 + 8 - // out := add(stateOffsetPreimageOffset(), stateSizePreimageOffset()) - } - function stateOffsetExitCode() -> out { - out := 80 // 72 + 8 - // out := add(stateOffsetPC(), stateSizePC()) - } - function stateOffsetExited() -> out { - out := 81 // 80 + 1 - // out := add(stateOffsetExitCode(), stateSizeExitCode()) - } - function stateOffsetStep() -> out { - out := 82 // 81 + 1 - // out := add(stateOffsetExited(), stateSizeExited()) - } - function stateOffsetHeap() -> out { - out := 90 // 82 + 8 - // out := add(stateOffsetStep(), stateSizeStep()) - } - function stateOffsetLoadReservation() -> out { - out := 98 // 90 + 8 - // out := add(stateOffsetHeap(), stateSizeHeap()) - } - function stateOffsetRegisters() -> out { - out := 106 // 98 + 8 - // out := add(stateOffsetLoadReservation(), stateSizeLoadReservation()) - } - function stateSize() -> out { - out := 362 // 106 + 256 - // out := add(stateOffsetRegisters(), stateSizeRegisters()) - } - - // - // Initial EVM memory / calldata checks - // - if iszero(eq(mload(0x40), 0x80)) { - // expected memory check: no allocated memory (start after scratch + free-mem-ptr + zero slot = 0x80) - revert(0, 0) - } - if iszero(eq(_stateData.offset, 132)) { - // 32*4+4 = 132 expected state data offset - revert(0, 0) - } - if iszero(eq(calldataload(sub(_stateData.offset, 32)), stateSize())) { - // user-provided state size must match expected state size - revert(0, 0) - } - function paddedLen(v) -> out { - // padded to multiple of 32 bytes - let padding := mod(sub(32, mod(v, 32)), 32) - out := add(v, padding) - } - if iszero(eq(_proof.offset, add(add(_stateData.offset, paddedLen(stateSize())), 32))) { - // 132+stateSize+padding+32 = expected proof offset - revert(0, 0) - } - function proofContentOffset() -> out { - // since we can't reference proof.offset in functions, blame Yul - // 132+362+(32-362%32)+32=548 - out := 548 - } - if iszero(eq(_proof.offset, proofContentOffset())) { revert(0, 0) } - - if mod(calldataload(sub(proofContentOffset(), 32)), mul(60, 32)) { - // proof offset must be stateContentOffset+paddedStateSize+32 - // proof size: 64-5+1=60 * 32 byte leaf, - // so the proofSize must be a multiple of 60*32 - revert(0, 0) - } - - // - // State loading - // - function memStateOffset() -> out { - out := 0x80 - } - // copy the state calldata into memory, so we can mutate it - mstore(0x40, add(memStateOffset(), stateSize())) // alloc, update free mem pointer - calldatacopy(memStateOffset(), _stateData.offset, stateSize()) // same format in memory as in calldata - - // - // State access - // - function readState(offset, length) -> out { - out := mload(add(memStateOffset(), offset)) // note: the state variables are all big-endian encoded - out := shr(shl(3, sub(32, length)), out) // shift-right to right-align data and reduce to desired length - } - function writeState(offset, length, data) { - let memOffset := add(memStateOffset(), offset) - // left-aligned mask of length bytes - let mask := shl(shl(3, sub(32, length)), not(0)) - let prev := mload(memOffset) - // align data to left - data := shl(shl(3, sub(32, length)), data) - // mask out data from previous word, and apply new data - let result := or(and(prev, not(mask)), data) - mstore(memOffset, result) - } - - function getMemRoot() -> out { - out := readState(stateOffsetMemRoot(), stateSizeMemRoot()) - } - function setMemRoot(v) { - writeState(stateOffsetMemRoot(), stateSizeMemRoot(), v) - } - - function getPreimageKey() -> out { - out := readState(stateOffsetPreimageKey(), stateSizePreimageKey()) - } - function setPreimageKey(k) { - writeState(stateOffsetPreimageKey(), stateSizePreimageKey(), k) - } - - function getPreimageOffset() -> out { - out := readState(stateOffsetPreimageOffset(), stateSizePreimageOffset()) - } - function setPreimageOffset(v) { - writeState(stateOffsetPreimageOffset(), stateSizePreimageOffset(), v) - } - - function getPC() -> out { - out := readState(stateOffsetPC(), stateSizePC()) - } - function setPC(v) { - writeState(stateOffsetPC(), stateSizePC(), v) - } - - function getExited() -> out { - out := readState(stateOffsetExited(), stateSizeExited()) - } - function setExited() { - writeState(stateOffsetExited(), stateSizeExited(), 1) - } - - function getExitCode() -> out { - out := readState(stateOffsetExitCode(), stateSizeExitCode()) - } - function setExitCode(v) { - writeState(stateOffsetExitCode(), stateSizeExitCode(), v) - } - - function getStep() -> out { - out := readState(stateOffsetStep(), stateSizeStep()) - } - function setStep(v) { - writeState(stateOffsetStep(), stateSizeStep(), v) - } - - function getHeap() -> out { - out := readState(stateOffsetHeap(), stateSizeHeap()) - } - function setHeap(v) { - writeState(stateOffsetHeap(), stateSizeHeap(), v) - } - - function getLoadReservation() -> out { - out := readState(stateOffsetLoadReservation(), stateSizeLoadReservation()) - } - function setLoadReservation(addr) { - writeState(stateOffsetLoadReservation(), stateSizeLoadReservation(), addr) - } - - function getRegister(reg) -> out { - if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // cannot load invalid register - - let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) - out := readState(offset, 8) - } - function setRegister(reg, v) { - if iszero64(reg) { - // reg 0 must stay 0 - // v is a HINT, but no hints are specified by standard spec, or used by us. - leave - } - if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // unknown register - - let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) - writeState(offset, 8, v) - } - - // - // State output - // - function vmStatus() -> status { - switch getExited() - case 1 { - switch getExitCode() - case 0 { status := 0 } - // VMStatusValid - case 1 { status := 1 } - // VMStatusInvalid - default { status := 2 } // VMStatusPanic - } - default { status := 3 } // VMStatusUnfinished - } - - function computeStateHash() -> out { - // Log the RISC-V state for debugging - log0(memStateOffset(), stateSize()) - - out := keccak256(memStateOffset(), stateSize()) - out := or(and(not(shl(248, 0xFF)), out), shl(248, vmStatus())) - } - - // - // Parse - functions to parse RISC-V instructions - see parse.go - // - function parseImmTypeI(instr) -> out { - out := signExtend64(shr64(toU64(20), instr), toU64(11)) - } - - function parseImmTypeS(instr) -> out { - out := - signExtend64( - or64(shl64(toU64(5), shr64(toU64(25), instr)), and64(shr64(toU64(7), instr), toU64(0x1F))), - toU64(11) - ) - } - - function parseImmTypeB(instr) -> out { - out := - signExtend64( - or64( - or64( - shl64(toU64(1), and64(shr64(toU64(8), instr), toU64(0xF))), - shl64(toU64(5), and64(shr64(toU64(25), instr), toU64(0x3F))) - ), - or64( - shl64(toU64(11), and64(shr64(toU64(7), instr), toU64(1))), - shl64(toU64(12), shr64(toU64(31), instr)) - ) - ), - toU64(12) - ) - } - - function parseImmTypeU(instr) -> out { - out := signExtend64(shr64(toU64(12), instr), toU64(19)) - } - - function parseImmTypeJ(instr) -> out { - out := - signExtend64( - or64( - or64( - and64(shr64(toU64(21), instr), shortToU64(0x3FF)), // 10 bits for index 0:9 - shl64(toU64(10), and64(shr64(toU64(20), instr), toU64(1))) // 1 bit for index 10 - ), - or64( - shl64(toU64(11), and64(shr64(toU64(12), instr), toU64(0xFF))), // 8 bits for index 11:18 - shl64(toU64(19), shr64(toU64(31), instr)) // 1 bit for index 19 - ) - ), - toU64(19) - ) - } - - function parseOpcode(instr) -> out { - out := and64(instr, toU64(0x7F)) - } - - function parseRd(instr) -> out { - out := and64(shr64(toU64(7), instr), toU64(0x1F)) - } - - function parseFunct3(instr) -> out { - out := and64(shr64(toU64(12), instr), toU64(0x7)) - } - - function parseRs1(instr) -> out { - out := and64(shr64(toU64(15), instr), toU64(0x1F)) - } - - function parseRs2(instr) -> out { - out := and64(shr64(toU64(20), instr), toU64(0x1F)) - } - - function parseFunct7(instr) -> out { - out := shr64(toU64(25), instr) - } - - // - // Memory functions - // - function proofOffset(proofIndex) -> offset { - // proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes - offset := mul64(mul64(toU64(proofIndex), toU64(60)), toU64(32)) - offset := add64(offset, proofContentOffset()) - } - - function hashPair(a, b) -> h { - mstore(0, a) - mstore(0x20, b) - h := keccak256(0, 0x40) - } - - function getMemoryB32(addr, proofIndex) -> out { - if and64(addr, toU64(31)) { - // quick addr alignment check - revertWithCode(0xbad10ad0) // addr not aligned with 32 bytes - } - let offset := proofOffset(proofIndex) - let leaf := calldataload(offset) - offset := add64(offset, toU64(32)) - - let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf - let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct - // the root - for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { - let sibling := calldataload(offset) - offset := add64(offset, toU64(32)) - switch and64(shr64(toU64(i), path), toU64(1)) - case 0 { node := hashPair(node, sibling) } - case 1 { node := hashPair(sibling, node) } - } - let memRoot := getMemRoot() - if iszero(eq(b32asBEWord(node), b32asBEWord(memRoot))) { - // verify the root matches - revertWithCode(0xbadf00d1) // bad memory proof - } - out := leaf - } - - // warning: setMemoryB32 does not verify the proof, - // it assumes the same memory proof has been verified with getMemoryB32 - function setMemoryB32(addr, v, proofIndex) { - if and64(addr, toU64(31)) { revertWithCode(0xbad10ad0) } // addr not aligned with 32 bytes - - let offset := proofOffset(proofIndex) - let leaf := v - offset := add64(offset, toU64(32)) - let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf - let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct - // the root - for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { - let sibling := calldataload(offset) - offset := add64(offset, toU64(32)) - - switch and64(shr64(toU64(i), path), toU64(1)) - case 0 { node := hashPair(node, sibling) } - case 1 { node := hashPair(sibling, node) } - } - setMemRoot(node) // store new memRoot - } - - // load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory - function loadMem(addr, size, signed, proofIndexL, proofIndexR) -> out { - if gt(size, 8) { revertWithCode(0xbad512e0) } // cannot load more than 8 bytes - // load/verify left part - let leftAddr := and64(addr, not64(toU64(31))) - let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) - let alignment := sub64(addr, leftAddr) - - let right := 0 - let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) - let leftShamt := sub64(sub64(toU64(32), alignment), size) - let rightShamt := toU64(0) - if iszero64(eq64(leftAddr, rightAddr)) { - // if unaligned, use second proof for the right part - if eq(proofIndexR, 0xff) { revertWithCode(0xbad22220) } // unexpected need for right-side proof in - // loadMem - // load/verify right part - right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) - // left content is aligned to right of 32 bytes - leftShamt := toU64(0) - rightShamt := sub64(sub64(toU64(64), alignment), size) - } - - let addr_ := addr - let size_ := size - // left: prepare for byte-taking by right-aligning - left := shr(u64ToU256(shl64(toU64(3), leftShamt)), left) - // right: right-align for byte-taking by right-aligning - right := shr(u64ToU256(shl64(toU64(3), rightShamt)), right) - // loop: - for { let i := 0 } lt(i, size_) { i := add(i, 1) } { - // translate to reverse byte lookup, since we are reading little-endian memory, and need the highest - // byte first. - // effAddr := (addr + size - 1 - i) &^ 31 - let effAddr := and64(sub64(sub64(add64(addr_, size_), toU64(1)), toU64(i)), not64(toU64(31))) - // take a byte from either left or right, depending on the effective address - let b := toU256(0) - switch eq64(effAddr, leftAddr) - case 1 { - b := and(left, toU256(0xff)) - left := shr(toU256(8), left) - } - case 0 { - b := and(right, toU256(0xff)) - right := shr(toU256(8), right) - } - // append it to the output - out := or64(shl64(toU64(8), out), u256ToU64(b)) - } - - if signed { - let signBitShift := sub64(shl64(toU64(3), size_), toU64(1)) - out := signExtend64(out, signBitShift) - } - } - - // Splits the value into a left and a right part, each with a mask (identify data) and a patch (diff - // content). - function leftAndRight(alignment, size, value) -> leftMask, rightMask, leftPatch, rightPatch { - let start := alignment - let end := add64(alignment, size) - for { let i := 0 } lt(i, 64) { i := add(i, 1) } { - let index := toU64(i) - let leftSide := lt64(index, toU64(32)) - switch leftSide - case 1 { - leftPatch := shl(8, leftPatch) - leftMask := shl(8, leftMask) - } - case 0 { - rightPatch := shl(8, rightPatch) - rightMask := shl(8, rightMask) - } - if and64(eq64(lt64(index, start), toU64(0)), lt64(index, end)) { - // if alignment <= i < alignment+size - let b := and(shr(u64ToU256(shl64(toU64(3), sub64(index, alignment))), value), toU256(0xff)) - switch leftSide - case 1 { - leftPatch := or(leftPatch, b) - leftMask := or(leftMask, toU256(0xff)) - } - case 0 { - rightPatch := or(rightPatch, b) - rightMask := or(rightMask, toU256(0xff)) - } - } - } - } - - function storeMemUnaligned(addr, size, value, proofIndexL, proofIndexR) { - if gt(size, 32) { revertWithCode(0xbad512e1) } // cannot store more than 32 bytes - - let leftAddr := and64(addr, not64(toU64(31))) - let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) - let alignment := sub64(addr, leftAddr) - let leftMask, rightMask, leftPatch, rightPatch := leftAndRight(alignment, size, value) - - // load the left base - let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) - // apply the left patch - left := or(and(left, not(leftMask)), leftPatch) - // write the left - setMemoryB32(leftAddr, beWordAsB32(left), proofIndexL) - - // if aligned: nothing more to do here - if eq64(leftAddr, rightAddr) { leave } - if eq(proofIndexR, 0xff) { revertWithCode(0xbad22221) } // unexpected need for right-side proof in - // storeMem - // load the right base (with updated mem root) - let right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) - // apply the right patch - right := or(and(right, not(rightMask)), rightPatch) - // write the right (with updated mem root) - setMemoryB32(rightAddr, beWordAsB32(right), proofIndexR) - } - - function storeMem(addr, size, value, proofIndexL, proofIndexR) { - if gt(size, 8) { revertWithCode(0xbad512e8) } // cannot store more than 8 bytes - - storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) - } - - // - // Preimage oracle interactions - // - function writePreimageKey(addr, count) -> out { - // adjust count down, so we only have to read a single 32 byte leaf of memory - let alignment := and64(addr, toU64(31)) - let maxData := sub64(toU64(32), alignment) - if gt64(count, maxData) { count := maxData } - - let dat := b32asBEWord(getMemoryB32(sub64(addr, alignment), 1)) - // shift out leading bits - dat := shl(u64ToU256(shl64(toU64(3), alignment)), dat) - // shift to right end, remove trailing bits - dat := shr(u64ToU256(shl64(toU64(3), sub64(toU64(32), count))), dat) - - let bits := shl(toU256(3), u64ToU256(count)) - - let preImageKey := getPreimageKey() - - // Append to key content by bit-shifting - let key := b32asBEWord(preImageKey) - key := shl(bits, key) - key := or(key, dat) - - // We reset the pre-image value offset back to 0 (the right part of the merkle pair) - setPreimageKey(beWordAsB32(key)) - setPreimageOffset(toU64(0)) - out := count - } - - function readPreimagePart(key, offset) -> dat, datlen { - let addr := sload(preimageOraclePos()) // calling Oracle.readPreimage(bytes32,uint256) - let memPtr := mload(0x40) // get pointer to free memory for preimage interactions - mstore(memPtr, shl(224, 0xe03110e1)) // (32-4)*8=224: right-pad the function selector, and then store it - // as prefix - mstore(add(memPtr, 0x04), key) - mstore(add(memPtr, 0x24), offset) - let res := call(gas(), addr, 0, memPtr, 0x44, 0x00, 0x40) // output into scratch space - if res { - // 1 on success - dat := mload(0x00) - datlen := mload(0x20) - leave - } - revertWithCode(0xbadf00d0) - } - - // Original implementation is at src/cannon/PreimageKeyLib.sol - // but it cannot be used because this is inside assembly block - function localize(preImageKey, localContext_) -> localizedKey { - // Grab the current free memory pointer to restore later. - let ptr := mload(0x40) - // Store the local data key and caller next to each other in memory for hashing. - mstore(0, preImageKey) - mstore(0x20, caller()) - mstore(0x40, localContext_) - // Localize the key with the above `localize` operation. - localizedKey := or(and(keccak256(0, 0x60), not(shl(248, 0xFF))), shl(248, 1)) - // Restore the free memory pointer. - mstore(0x40, ptr) - } - - function readPreimageValue(addr, count, localContext_) -> out { - let preImageKey := getPreimageKey() - let offset := getPreimageOffset() - // If the preimage key is a local key, localize it in the context of the caller. - let preImageKeyPrefix := shr(248, preImageKey) // 256-8=248 - if eq(preImageKeyPrefix, 1) { preImageKey := localize(preImageKey, localContext_) } - // make call to pre-image oracle contract - let pdatB32, pdatlen := readPreimagePart(preImageKey, offset) - if iszero64(pdatlen) { - // EOF - out := toU64(0) - leave - } - let alignment := and64(addr, toU64(31)) // how many bytes addr is offset from being left-aligned - let maxData := sub64(toU64(32), alignment) // higher alignment leaves less room for data this step - if gt64(count, maxData) { count := maxData } - if gt64(count, pdatlen) { - // cannot read more than pdatlen - count := pdatlen - } - - let addr_ := addr - let count_ := count - let bits := shl64(toU64(3), sub64(toU64(32), count_)) // 32-count, in bits - let mask := not(sub(shl(u64ToU256(bits), toU256(1)), toU256(1))) // left-aligned mask for count bytes - let alignmentBits := u64ToU256(shl64(toU64(3), alignment)) - mask := shr(alignmentBits, mask) // mask of count bytes, shifted by alignment - let pdat := shr(alignmentBits, b32asBEWord(pdatB32)) // pdat, shifted by alignment - - // update pre-image reader with updated offset - let newOffset := add64(offset, count_) - setPreimageOffset(newOffset) - - out := count_ - - let node := getMemoryB32(sub64(addr_, alignment), 1) - let dat := and(b32asBEWord(node), not(mask)) // keep old bytes outside of mask - dat := or(dat, and(pdat, mask)) // fill with bytes from pdat - setMemoryB32(sub64(addr_, alignment), beWordAsB32(dat), 1) - } - - // - // Syscall handling - // - function sysCall(localContext_) { - let a7 := getRegister(toU64(17)) - switch a7 - case 93 { - // exit the calling thread. No multi-thread support yet, so just exit. - let a0 := getRegister(toU64(10)) - setExitCode(and(a0, 0xff)) - setExited() - // program stops here, no need to change registers. - } - case 94 { - // exit-group - let a0 := getRegister(toU64(10)) - setExitCode(and(a0, 0xff)) - setExited() - } - case 214 { - // brk - // Go sys_linux_riscv64 runtime will only ever call brk(NULL), i.e. first argument (register a0) set - // to 0. - - // brk(0) changes nothing about the memory, and returns the current page break - let v := shl64(toU64(30), toU64(1)) // set program break at 1 GiB - setRegister(toU64(10), v) - setRegister(toU64(11), toU64(0)) // no error - } - case 222 { - // mmap - // A0 = addr (hint) - let addr := getRegister(toU64(10)) - // A1 = n (length) - let length := getRegister(toU64(11)) - // A2 = prot (memory protection type, can ignore) - // A3 = flags (shared with other process and or written back to file) - let flags := getRegister(toU64(13)) - // A4 = fd (file descriptor, can ignore because we support anon memory only) - let fd := getRegister(toU64(14)) - // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) - - let errCode := 0 - // ensure MAP_ANONYMOUS is set and fd == -1 - switch or(iszero(and(flags, 0x20)), iszero(eq(fd, u64Mask()))) - case 1 { - addr := u64Mask() - errCode := toU64(0x4d) - } - default { - switch addr - case 0 { - // No hint, allocate it ourselves, by as much as the requested length. - // Increase the length to align it with desired page size if necessary. - let align := and64(length, shortToU64(4095)) - if align { length := add64(length, sub64(shortToU64(4096), align)) } - let prevHeap := getHeap() - addr := prevHeap - setHeap(add64(prevHeap, length)) // increment heap with length - } - default { - // allow hinted memory address (leave it in A0 as return argument) - } - } - - setRegister(toU64(10), addr) - setRegister(toU64(11), errCode) - } - case 63 { - // read - let fd := getRegister(toU64(10)) // A0 = fd - let addr := getRegister(toU64(11)) // A1 = *buf addr - let count := getRegister(toU64(12)) // A2 = count - let n := 0 - let errCode := 0 - switch fd - case 0 { - // stdin - n := toU64(0) // never read anything from stdin - errCode := toU64(0) - } - case 3 { - // hint-read - // say we read it all, to continue execution after reading the hint-write ack response - n := count - errCode := toU64(0) - } - case 5 { - // preimage read - n := readPreimageValue(addr, count, localContext_) - errCode := toU64(0) - } - default { - n := u64Mask() // -1 (reading error) - errCode := toU64(0x4d) // EBADF - } - setRegister(toU64(10), n) - setRegister(toU64(11), errCode) - } - case 64 { - // write - let fd := getRegister(toU64(10)) // A0 = fd - let addr := getRegister(toU64(11)) // A1 = *buf addr - let count := getRegister(toU64(12)) // A2 = count - let n := 0 - let errCode := 0 - switch fd - case 1 { - // stdout - n := count // write completes fully in single instruction step - errCode := toU64(0) - } - case 2 { - // stderr - n := count // write completes fully in single instruction step - errCode := toU64(0) - } - case 4 { - // hint-write - n := count - errCode := toU64(0) - } - case 6 { - // pre-image key-write - n := writePreimageKey(addr, count) - errCode := toU64(0) // no error - } - default { - // any other file, including (3) hint read (5) preimage read - n := u64Mask() // -1 (writing error) - errCode := toU64(0x4d) // EBADF - } - setRegister(toU64(10), n) - setRegister(toU64(11), errCode) - } - case 25 { - // fcntl - file descriptor manipulation / info lookup - let fd := getRegister(toU64(10)) // A0 = fd - let cmd := getRegister(toU64(11)) // A1 = cmd - let out := 0 - let errCode := 0 - switch cmd - case 0x1 { - // F_GETFD: get file descriptor flags - switch fd - case 0 { - // stdin - out := toU64(0) // no flag set - } - case 1 { - // stdout - out := toU64(0) // no flag set - } - case 2 { - // stderr - out := toU64(0) // no flag set - } - case 3 { - // hint-read - out := toU64(0) // no flag set - } - case 4 { - // hint-write - out := toU64(0) // no flag set - } - case 5 { - // pre-image read - out := toU64(0) // no flag set - } - case 6 { - // pre-image write - out := toU64(0) // no flag set - } - default { - out := u64Mask() - errCode := toU64(0x4d) //EBADF - } - } - case 0x3 { - // F_GETFL: get file descriptor flags - switch fd - case 0 { - // stdin - out := toU64(0) // O_RDONLY - } - case 1 { - // stdout - out := toU64(1) // O_WRONLY - } - case 2 { - // stderr - out := toU64(1) // O_WRONLY - } - case 3 { - // hint-read - out := toU64(0) // O_RDONLY - } - case 4 { - // hint-write - out := toU64(1) // O_WRONLY - } - case 5 { - // pre-image read - out := toU64(0) // O_RDONLY - } - case 6 { - // pre-image write - out := toU64(1) // O_WRONLY - } - default { - out := u64Mask() - errCode := toU64(0x4d) // EBADF - } - } - default { - // no other commands: don't allow changing flags, duplicating FDs, etc. - out := u64Mask() - errCode := toU64(0x16) // EINVAL (cmd not recognized by this kernel) - } - setRegister(toU64(10), out) - setRegister(toU64(11), errCode) // EBADF - } - case 56 { - // openat - the Go linux runtime will try to open optional /sys/kernel files for performance hints - setRegister(toU64(10), u64Mask()) - setRegister(toU64(11), toU64(0xd)) // EACCES - no access allowed - } - case 113 { - // clock_gettime - let addr := getRegister(toU64(11)) // addr of timespec struct - // write 1337s + 42ns as time - let value := or(shortToU256(1337), shl(shortToU256(64), toU256(42))) - storeMemUnaligned(addr, toU64(16), value, 1, 2) - setRegister(toU64(10), toU64(0)) - setRegister(toU64(11), toU64(0)) - } - case 220 { - // clone - not supported - setRegister(toU64(10), toU64(1)) - setRegister(toU64(11), toU64(0)) - } - case 163 { - // getrlimit - let res := getRegister(toU64(10)) - let addr := getRegister(toU64(11)) - switch res - case 0x7 { - // RLIMIT_NOFILE - // first 8 bytes: soft limit. 1024 file handles max open - // second 8 bytes: hard limit - storeMemUnaligned( - addr, toU64(16), or(shortToU256(1024), shl(toU256(64), shortToU256(1024))), 1, 2 - ) - setRegister(toU64(10), toU64(0)) - setRegister(toU64(11), toU64(0)) - } - default { revertWithCode(0xf0012) } // unrecognized resource limit lookup - } - case 261 { - // prlimit64 -- unsupported, we have getrlimit, is prlimit64 even called? - revertWithCode(0xf001ca11) // unsupported system call - } - case 422 { - // futex - not supported, for now - revertWithCode(0xf001ca11) // unsupported system call - } - case 101 { - // nanosleep - not supported, for now - revertWithCode(0xf001ca11) // unsupported system call - } - default { - // Ignore(no-op) unsupported system calls - setRegister(toU64(10), toU64(0)) - setRegister(toU64(11), toU64(0)) - } - } - - // - // Instruction execution - // - if getExited() { - // early exit if we can - mstore(0, computeStateHash()) - return(0, 0x20) - } - setStep(add64(getStep(), toU64(1))) - - let _pc := getPC() - let instr := loadMem(_pc, toU64(4), false, 0, 0xff) // raw instruction - - // these fields are ignored if not applicable to the instruction type / opcode - let opcode := parseOpcode(instr) - let rd := parseRd(instr) // destination register index - let funct3 := parseFunct3(instr) - let rs1 := parseRs1(instr) // source register 1 index - let rs2 := parseRs2(instr) // source register 2 index - let funct7 := parseFunct7(instr) - - switch opcode - case 0x03 { - let pc_ := _pc - // 000_0011: memory loading - // LB, LH, LW, LD, LBU, LHU, LWU - - // bits[14:12] set to 111 are reserved - if eq64(funct3, toU64(0x7)) { revertWithCode(0xbadc0de) } - - let imm := parseImmTypeI(instr) - let signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag - let size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size - let rs1Value := getRegister(rs1) - let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) - let rdValue := loadMem(memIndex, size, signed, 1, 2) - setRegister(rd, rdValue) - setPC(add64(pc_, toU64(4))) - } - case 0x23 { - let pc_ := _pc - // 010_0011: memory storing - // SB, SH, SW, SD - let imm := parseImmTypeS(instr) - let size := shl64(funct3, toU64(1)) - let value := getRegister(rs2) - let rs1Value := getRegister(rs1) - let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) - storeMem(memIndex, size, value, 1, 2) - setPC(add64(pc_, toU64(4))) - } - case 0x63 { - // 110_0011: branching - let rs1Value := getRegister(rs1) - let rs2Value := getRegister(rs2) - let branchHit := toU64(0) - switch funct3 - case 0 { - // 000 = BEQ - branchHit := eq64(rs1Value, rs2Value) - } - case 1 { - // 001 = BNE - branchHit := and64(not64(eq64(rs1Value, rs2Value)), toU64(1)) - } - case 4 { - // 100 = BLT - branchHit := slt64(rs1Value, rs2Value) - } - case 5 { - // 101 = BGE - branchHit := and64(not64(slt64(rs1Value, rs2Value)), toU64(1)) - } - case 6 { - // 110 = BLTU - branchHit := lt64(rs1Value, rs2Value) - } - case 7 { - // 111 := BGEU - branchHit := and64(not64(lt64(rs1Value, rs2Value)), toU64(1)) - } - switch branchHit - case 0 { _pc := add64(_pc, toU64(4)) } - default { - let imm := parseImmTypeB(instr) - // imm12 is a signed offset, in multiples of 2 bytes. - // So it's really 13 bits with a hardcoded 0 bit. - _pc := add64(_pc, imm) - } - - // The PC must be aligned to 4 bytes. - if and64(_pc, toU64(3)) { revertWithCode(0xbad10ad0) } // target not aligned with 4 bytes - - // not like the other opcodes: nothing to write to rd register, and PC has already changed - setPC(_pc) - } - case 0x13 { - // 001_0011: immediate arithmetic and logic - let rs1Value := getRegister(rs1) - let imm := parseImmTypeI(instr) - let rdValue := 0 - switch funct3 - case 0 { - // 000 = ADDI - rdValue := add64(rs1Value, imm) - } - case 1 { - // 001 = SLLI - rdValue := shl64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode - } - case 2 { - // 010 = SLTI - rdValue := slt64(rs1Value, imm) - } - case 3 { - // 011 = SLTIU - rdValue := lt64(rs1Value, imm) - } - case 4 { - // 100 = XORI - rdValue := xor64(rs1Value, imm) - } - case 5 { - // 101 = SR~ - switch shr64(toU64(6), imm) - // in rv64i the top 6 bits select the shift type - case 0x00 { - // 000000 = SRLI - rdValue := shr64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode - } - case 0x10 { - // 010000 = SRAI - rdValue := sar64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode - } - default { revertWithCode(0xbadc0de) } - } - case 6 { - // 110 = ORI - rdValue := or64(rs1Value, imm) - } - case 7 { - // 111 = ANDI - rdValue := and64(rs1Value, imm) - } - default { revertWithCode(0xbadc0de) } - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x1B { - // 001_1011: immediate arithmetic and logic signed 32 bit - let rs1Value := getRegister(rs1) - let imm := parseImmTypeI(instr) - let rdValue := 0 - switch funct3 - case 0 { - // 000 = ADDIW - rdValue := mask32Signed64(add64(rs1Value, imm)) - } - case 1 { - // 001 = SLLIW - - // SLLIW where imm[5] != 0 is reserved - if and64(imm, toU64(0x20)) { revertWithCode(0xbadc0de) } - rdValue := mask32Signed64(shl64(and64(imm, toU64(0x1F)), rs1Value)) - } - case 5 { - // SRLIW and SRAIW where imm[5] != 0 is reserved - if and64(imm, toU64(0x20)) { revertWithCode(0xbadc0de) } - - // 101 = SR~ - let shamt := and64(imm, toU64(0x1F)) - switch shr64(toU64(5), imm) - // top 7 bits select the shift type - case 0x00 { - // 0000000 = SRLIW - rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) - } - case 0x20 { - // 0100000 = SRAIW - rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) - } - default { revertWithCode(0xbadc0de) } - } - default { revertWithCode(0xbadc0de) } - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x33 { - // 011_0011: register arithmetic and logic - let rs1Value := getRegister(rs1) - let rs2Value := getRegister(rs2) - let rdValue := 0 - switch funct7 - case 1 { - // RV M extension - switch funct3 - case 0 { - // 000 = MUL: signed x signed - rdValue := mul64(rs1Value, rs2Value) - } - case 1 { - // 001 = MULH: upper bits of signed x signed - rdValue := - u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value)))) - } - case 2 { - // 010 = MULHSU: upper bits of signed x unsigned - rdValue := u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value)))) - } - case 3 { - // 011 = MULHU: upper bits of unsigned x unsigned - rdValue := u256ToU64(shr(toU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value)))) - } - case 4 { - // 100 = DIV - switch rs2Value - case 0 { rdValue := u64Mask() } - default { rdValue := sdiv64(rs1Value, rs2Value) } - } - case 5 { - // 101 = DIVU - switch rs2Value - case 0 { rdValue := u64Mask() } - default { rdValue := div64(rs1Value, rs2Value) } - } - case 6 { - // 110 = REM - switch rs2Value - case 0 { rdValue := rs1Value } - default { rdValue := smod64(rs1Value, rs2Value) } - } - case 7 { - // 111 = REMU - switch rs2Value - case 0 { rdValue := rs1Value } - default { rdValue := mod64(rs1Value, rs2Value) } - } - default { revertWithCode(0xbadc0de) } - } - default { - switch funct3 - case 0 { - // 000 = ADD/SUB - switch funct7 - case 0x00 { - // 0000000 = ADD - rdValue := add64(rs1Value, rs2Value) - } - case 0x20 { - // 0100000 = SUB - rdValue := sub64(rs1Value, rs2Value) - } - default { revertWithCode(0xbadc0de) } - } - case 1 { - // 001 = SLL - rdValue := shl64(and64(rs2Value, toU64(0x3F)), rs1Value) // only the low 6 bits are consider in - // RV6VI - } - case 2 { - // 010 = SLT - rdValue := slt64(rs1Value, rs2Value) - } - case 3 { - // 011 = SLTU - rdValue := lt64(rs1Value, rs2Value) - } - case 4 { - // 100 = XOR - rdValue := xor64(rs1Value, rs2Value) - } - case 5 { - // 101 = SR~ - switch funct7 - case 0x00 { - // 0000000 = SRL - rdValue := shr64(and64(rs2Value, toU64(0x3F)), rs1Value) // logical: fill with zeroes - } - case 0x20 { - // 0100000 = SRA - rdValue := sar64(and64(rs2Value, toU64(0x3F)), rs1Value) // arithmetic: sign bit is extended - } - default { revertWithCode(0xbadc0de) } - } - case 6 { - // 110 = OR - rdValue := or64(rs1Value, rs2Value) - } - case 7 { - // 111 = AND - rdValue := and64(rs1Value, rs2Value) - } - default { revertWithCode(0xbadc0de) } - } - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x3B { - // 011_1011: register arithmetic and logic in 32 bits - let rs1Value := getRegister(rs1) - let rs2Value := and64(getRegister(rs2), u32Mask()) - let rdValue := 0 - switch funct7 - case 1 { - // RV M extension - switch funct3 - case 0 { - // 000 = MULW - rdValue := mask32Signed64(mul64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) - } - case 4 { - // 100 = DIVW - switch rs2Value - case 0 { rdValue := u64Mask() } - default { - rdValue := mask32Signed64(sdiv64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) - } - } - case 5 { - // 101 = DIVUW - switch rs2Value - case 0 { rdValue := u64Mask() } - default { - rdValue := mask32Signed64(div64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) - } - } - case 6 { - // 110 = REMW - switch rs2Value - case 0 { rdValue := mask32Signed64(rs1Value) } - default { - rdValue := mask32Signed64(smod64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) - } - } - case 7 { - // 111 = REMUW - switch rs2Value - case 0 { rdValue := mask32Signed64(rs1Value) } - default { - rdValue := mask32Signed64(mod64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) - } - } - default { revertWithCode(0xbadc0de) } - } - default { - switch funct3 - case 0 { - // 000 = ADDW/SUBW - switch funct7 - case 0x00 { - // 0000000 = ADDW - rdValue := mask32Signed64(add64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) - } - case 0x20 { - // 0100000 = SUBW - rdValue := mask32Signed64(sub64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) - } - default { revertWithCode(0xbadc0de) } - } - case 1 { - // 001 = SLLW - rdValue := mask32Signed64(shl64(and64(rs2Value, toU64(0x1F)), rs1Value)) - } - case 5 { - // 101 = SR~ - let shamt := and64(rs2Value, toU64(0x1F)) - switch funct7 - case 0x00 { - // 0000000 = SRLW - rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) - } - case 0x20 { - // 0100000 = SRAW - rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) - } - default { revertWithCode(0xbadc0de) } - } - default { revertWithCode(0xbadc0de) } - } - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x37 { - // 011_0111: LUI = Load upper immediate - let imm := parseImmTypeU(instr) - let rdValue := shl64(toU64(12), imm) - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x17 { - // 001_0111: AUIPC = Add upper immediate to PC - let imm := parseImmTypeU(instr) - let rdValue := add64(_pc, signExtend64(shl64(toU64(12), imm), toU64(31))) - setRegister(rd, rdValue) - setPC(add64(_pc, toU64(4))) - } - case 0x6F { - // 110_1111: JAL = Jump and link - let imm := parseImmTypeJ(instr) - let rdValue := add64(_pc, toU64(4)) - setRegister(rd, rdValue) - - let newPC := add64(_pc, signExtend64(shl64(toU64(1), imm), toU64(20))) - if and64(newPC, toU64(3)) { - // quick target alignment check - revertWithCode(0xbad10ad0) // target not aligned with 4 bytes - } - setPC(newPC) // signed offset in multiples of 2 - // bytes (last bit is there, but ignored) - } - case 0x67 { - // 110_0111: JALR = Jump and link register - let rs1Value := getRegister(rs1) - let imm := parseImmTypeI(instr) - let rdValue := add64(_pc, toU64(4)) - setRegister(rd, rdValue) - - let newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1))) - if and64(newPC, toU64(3)) { - // quick target alignment check - revertWithCode(0xbad10ad0) // target not aligned with 4 bytes - } - setPC(newPC) // least significant bit is set to 0 - } - case 0x73 { - // 111_0011: environment things - switch funct3 - case 0 { - // 000 = ECALL/EBREAK - switch shr64(toU64(20), instr) - // I-type, top 12 bits - case 0 { - // imm12 = 000000000000 ECALL - sysCall(_localContext) - setPC(add64(_pc, toU64(4))) - } - default { - // imm12 = 000000000001 EBREAK - setPC(add64(_pc, toU64(4))) // ignore breakpoint - } - } - default { - // CSR instructions - setRegister(rd, toU64(0)) // ignore CSR instructions - setPC(add64(_pc, toU64(4))) - } - } - case 0x2F { - // 010_1111: RV{32,64}A and RV{32,64}A atomic operations extension - // acquire and release bits: - // aq := and64(shr64(toU64(1), funct7), toU64(1)) - // rl := and64(funct7, toU64(1)) - // if none set: unordered - // if aq is set: no following mem ops observed before acquire mem op - // if rl is set: release mem op not observed before earlier mem ops - // if both set: sequentially consistent - // These are no-op here because there is no pipeline of mem ops to acquire/release. - - // 0b010 == RV32A W variants - // 0b011 == RV64A D variants - let size := shl64(funct3, toU64(1)) - if or(lt64(size, toU64(4)), gt64(size, toU64(8))) { revertWithCode(0xbada70) } // bad AMO size - - let addr := getRegister(rs1) - if mod64(addr, size) { - // quick addr alignment check - revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes - } - - let op := shr64(toU64(2), funct7) - switch op - case 0x2 { - // 00010 = LR = Load Reserved - let v := loadMem(addr, size, true, 1, 2) - setRegister(rd, v) - setLoadReservation(addr) - } - case 0x3 { - // 00011 = SC = Store Conditional - let rdValue := toU64(1) - if eq64(addr, getLoadReservation()) { - let rs2Value := getRegister(rs2) - storeMem(addr, size, rs2Value, 1, 2) - rdValue := toU64(0) - } - setRegister(rd, rdValue) - setLoadReservation(toU64(0)) - } - default { - // AMO: Atomic Memory Operation - let rs2Value := getRegister(rs2) - if eq64(size, toU64(4)) { rs2Value := mask32Signed64(rs2Value) } - let value := rs2Value - let v := loadMem(addr, size, true, 1, 2) - let rdValue := v - switch op - case 0x0 { - // 00000 = AMOADD = add - v := add64(v, value) - } - case 0x1 { - // 00001 = AMOSWAP - v := value - } - case 0x4 { - // 00100 = AMOXOR = xor - v := xor64(v, value) - } - case 0x8 { - // 01000 = AMOOR = or - v := or64(v, value) - } - case 0xc { - // 01100 = AMOAND = and - v := and64(v, value) - } - case 0x10 { - // 10000 = AMOMIN = min signed - if slt64(value, v) { v := value } - } - case 0x14 { - // 10100 = AMOMAX = max signed - if sgt64(value, v) { v := value } - } - case 0x18 { - // 11000 = AMOMINU = min unsigned - if lt64(value, v) { v := value } - } - case 0x1c { - // 11100 = AMOMAXU = max unsigned - if gt64(value, v) { v := value } - } - default { revertWithCode(0xf001a70) } // unknown atomic operation - - storeMem(addr, size, v, 1, 3) // after overwriting 1, proof 2 is no longer valid - setRegister(rd, rdValue) - } - setPC(add64(_pc, toU64(4))) - } - case 0x0F { - // 000_1111: fence - // Used to impose additional ordering constraints; flushing the mem operation pipeline. - // This VM doesn't have a pipeline, nor additional harts, so this is a no-op. - // FENCE / FENCE.TSO / FENCE.I all no-op: there's nothing to synchronize. - setPC(add64(_pc, toU64(4))) - } - case 0x07 { - // FLW/FLD: floating point load word/double - setPC(add64(_pc, toU64(4))) // no-op this. - } - case 0x27 { - // FSW/FSD: floating point store word/double - setPC(add64(_pc, toU64(4))) // no-op this. - } - case 0x53 { - // FADD etc. no-op is enough to pass Go runtime check - setPC(add64(_pc, toU64(4))) // no-op this. - } - default { revertWithCode(0xf001c0de) } // unknown instruction opcode - - mstore(0, computeStateHash()) - return(0, 0x20) - } - } -} diff --git a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol index abec06bf78b66..294545b156164 100644 --- a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { stdStorage, StdStorage } from "forge-std/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { ForgeArtifacts, StorageSlot } from "scripts/libraries/ForgeArtifacts.sol"; diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 5dd8a9e9dc492..7feffe72daaaf 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -2,7 +2,8 @@ pragma solidity 0.8.15; // Testing -import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; import { VmSafe } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { FeatureFlags } from "test/setup/FeatureFlags.sol"; @@ -2241,6 +2242,11 @@ contract OPContractsManager_Migrate_Test is OPContractsManager_TestInit { contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase, DisputeGames { using stdStorage for StdStorage; + function setUp() public override { + super.setUp(); + skipIfDevFeatureEnabled(DevFeatures.OPCM_V2); + } + // This helper function is used to convert the input struct type defined in DeployOPChain.s.sol // to the input struct type defined in OPContractsManager.sol. function toOPCMDeployInput(Types.DeployOPChainInput memory _doi) @@ -2278,7 +2284,7 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase, DisputeGames input.l2ChainId = 0; vm.expectRevert(IOPContractsManager.InvalidChainId.selector); - opcm.deploy(input); + IOPContractsManager(opcmAddr).deploy(input); } function test_deploy_l2ChainIdEqualsCurrentChainId_reverts() public { @@ -2286,19 +2292,19 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase, DisputeGames input.l2ChainId = block.chainid; vm.expectRevert(IOPContractsManager.InvalidChainId.selector); - opcm.deploy(input); + IOPContractsManager(opcmAddr).deploy(input); } function test_deploy_succeeds() public { vm.expectEmit(true, true, true, false); // TODO precompute the expected `deployOutput`. emit Deployed(deployOPChainInput.l2ChainId, address(this), bytes("")); - opcm.deploy(toOPCMDeployInput(deployOPChainInput)); + IOPContractsManager(opcmAddr).deploy(toOPCMDeployInput(deployOPChainInput)); } /// @notice Test that deploy sets the permissioned dispute game implementation function test_deployPermissioned_succeeds() public { // Sanity-check setup is consistent with devFeatures flag - IOPContractsManager.Implementations memory impls = opcm.implementations(); + IOPContractsManager.Implementations memory impls = IOPContractsManager(opcmAddr).implementations(); address pdgImpl = address(impls.permissionedDisputeGameV2Impl); address fdgImpl = address(impls.faultDisputeGameV2Impl); assertFalse(pdgImpl == address(0), "PDG implementation address should be non-zero"); @@ -2306,7 +2312,7 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase, DisputeGames // Run OPCM.deploy IOPContractsManager.DeployInput memory opcmInput = toOPCMDeployInput(deployOPChainInput); - IOPContractsManager.DeployOutput memory opcmOutput = opcm.deploy(opcmInput); + IOPContractsManager.DeployOutput memory opcmOutput = IOPContractsManager(opcmAddr).deploy(opcmInput); // Verify that the DisputeGameFactory has registered an implementation for the PERMISSIONED_CANNON game type address actualPDGAddress = address(opcmOutput.disputeGameFactoryProxy.gameImpls(GameTypes.PERMISSIONED_CANNON)); diff --git a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol index eceb10760a546..bab34b933d6ea 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol @@ -236,32 +236,34 @@ abstract contract OPContractsManagerStandardValidator_TestInit is CommonTest, Di address owner = proxyAdmin.owner(); // Prepare the upgrade input. - IOPContractsManagerV2.DisputeGameConfig[] memory disputeGameConfigs = - new IOPContractsManagerV2.DisputeGameConfig[](3); - disputeGameConfigs[0] = IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](3); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON), gameType: GameTypes.CANNON, - gameArgs: abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) + gameArgs: abi.encode( + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate }) + ) }); - disputeGameConfigs[1] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[1] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.PERMISSIONED_CANNON), gameType: GameTypes.PERMISSIONED_CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: cannonPrestate, proposer: proposer, challenger: challenger }) ) }); - disputeGameConfigs[2] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[2] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON_KONA), gameType: GameTypes.CANNON_KONA, gameArgs: abi.encode( - IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate }) + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate }) ) }); diff --git a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol index 8e64d609e9efb..eca3d47705348 100644 --- a/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol +++ b/packages/contracts-bedrock/test/L1/ResourceMetering.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; // Contracts import { ResourceMetering } from "src/L1/ResourceMetering.sol"; diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerContainer.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerContainer.t.sol index 78eafbdf76304..fbb579449e9e7 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerContainer.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerContainer.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; // Contracts import { OPContractsManagerContainer } from "src/L1/opcm/OPContractsManagerContainer.sol"; diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol index 5ca424905f413..ce79986542f91 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; // Contracts import { OPContractsManagerUtils } from "src/L1/opcm/OPContractsManagerUtils.sol"; diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol index 917e82b3921cc..0b072c3a5b707 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol @@ -12,6 +12,7 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { Claim, Hash } from "src/dispute/lib/LibUDT.sol"; import { GameType, GameTypes, Proposal } from "src/dispute/lib/Types.sol"; import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; @@ -22,6 +23,12 @@ import { ISemver } from "interfaces/universal/ISemver.sol"; import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { IOPContractsManagerMigrator } from "interfaces/L1/opcm/IOPContractsManagerMigrator.sol"; +import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; /// @title OPContractsManagerV2_TestInit /// @notice Base test initialization contract for OPContractsManagerV2. @@ -65,8 +72,9 @@ contract OPContractsManagerV2_TestInit is CommonTest, DisputeGames { address deployChallenger; for (uint256 i = 0; i < _deployConfig.disputeGameConfigs.length; i++) { if (_deployConfig.disputeGameConfigs[i].gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw()) { - IOPContractsManagerV2.PermissionedDisputeGameConfig memory parsedArgs = abi.decode( - _deployConfig.disputeGameConfigs[i].gameArgs, (IOPContractsManagerV2.PermissionedDisputeGameConfig) + IOPContractsManagerUtils.PermissionedDisputeGameConfig memory parsedArgs = abi.decode( + _deployConfig.disputeGameConfigs[i].gameArgs, + (IOPContractsManagerUtils.PermissionedDisputeGameConfig) ); deployProposer = parsedArgs.proposer; deployChallenger = parsedArgs.challenger; @@ -240,20 +248,20 @@ contract OPContractsManagerV2_Upgrade_TestInit is OPContractsManagerV2_TestInit address initialProposerForV2 = permissionedGameProposer(disputeGameFactory); v2UpgradeInput.systemConfig = systemConfig; v2UpgradeInput.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON), gameType: GameTypes.CANNON, - gameArgs: abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) + gameArgs: abi.encode(IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) }) ); v2UpgradeInput.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.PERMISSIONED_CANNON), gameType: GameTypes.PERMISSIONED_CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: cannonPrestate, proposer: initialProposerForV2, challenger: initialChallengerForV2 @@ -262,11 +270,13 @@ contract OPContractsManagerV2_Upgrade_TestInit is OPContractsManagerV2_TestInit }) ); v2UpgradeInput.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON_KONA), gameType: GameTypes.CANNON_KONA, - gameArgs: abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate })) + gameArgs: abi.encode( + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate }) + ) }) ); @@ -532,7 +542,7 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI /// in the wrong order. function test_upgrade_wrongGameConfigOrder_reverts() public { // Swap the game config order. - IOPContractsManagerV2.DisputeGameConfig memory temp = v2UpgradeInput.disputeGameConfigs[0]; + IOPContractsManagerUtils.DisputeGameConfig memory temp = v2UpgradeInput.disputeGameConfigs[0]; v2UpgradeInput.disputeGameConfigs[0] = v2UpgradeInput.disputeGameConfigs[1]; v2UpgradeInput.disputeGameConfigs[1] = temp; @@ -731,9 +741,9 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI // Update the dispute game configs to point at the new prestates. v2UpgradeInput.disputeGameConfigs[0].gameArgs = - abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: newPrestate })); + abi.encode(IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: newPrestate })); v2UpgradeInput.disputeGameConfigs[1].gameArgs = abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: newPrestate, proposer: permissionedGameProposer(disputeGameFactory), challenger: permissionedGameChallenger(disputeGameFactory) @@ -1020,20 +1030,20 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { address initialChallenger = permissionedGameChallenger(disputeGameFactory); address initialProposer = permissionedGameProposer(disputeGameFactory); deployConfig.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, - initBond: 0.08 ether, // Standard init bond + initBond: DEFAULT_DISPUTE_GAME_INIT_BOND, // Standard init bond gameType: GameTypes.CANNON, - gameArgs: abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) + gameArgs: abi.encode(IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) }) ); deployConfig.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, - initBond: 0.08 ether, // Standard init bond + initBond: DEFAULT_DISPUTE_GAME_INIT_BOND, // Standard init bond gameType: GameTypes.PERMISSIONED_CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: cannonPrestate, proposer: initialProposer, challenger: initialChallenger @@ -1042,11 +1052,13 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { }) ); deployConfig.disputeGameConfigs.push( - IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, - initBond: 0.08 ether, // Standard init bond + initBond: DEFAULT_DISPUTE_GAME_INIT_BOND, // Standard init bond gameType: GameTypes.CANNON_KONA, - gameArgs: abi.encode(IOPContractsManagerV2.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate })) + gameArgs: abi.encode( + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate }) + ) }) ); } @@ -1095,7 +1107,7 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { /// @notice Tests that deploy reverts when game configs are in wrong order. function test_deploy_wrongGameConfigOrder_reverts() public { // Swap the game config order. - IOPContractsManagerV2.DisputeGameConfig memory temp = deployConfig.disputeGameConfigs[0]; + IOPContractsManagerUtils.DisputeGameConfig memory temp = deployConfig.disputeGameConfigs[0]; deployConfig.disputeGameConfigs[0] = deployConfig.disputeGameConfigs[1]; deployConfig.disputeGameConfigs[1] = temp; @@ -1141,3 +1153,354 @@ contract OPContractsManagerV2_DevFeatureBitmap_Test is OPContractsManagerV2_Test ); } } + +/// @title OPContractsManagerV2_Migrate_Test +/// @notice Tests the `migrate` function of the `OPContractsManagerV2` contract. +contract OPContractsManagerV2_Migrate_Test is OPContractsManagerV2_TestInit { + /// @notice Deployed chain contracts for chain 1. + IOPContractsManagerV2.ChainContracts chainContracts1; + + /// @notice Deployed chain contracts for chain 2. + IOPContractsManagerV2.ChainContracts chainContracts2; + + /// @notice Super root prestate for super cannon games. + Claim superPrestate = Claim.wrap(bytes32(keccak256("superPrestate"))); + + /// @notice Function requires interop portal. + function setUp() public override { + super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + + // Deploy two chains via OPCMv2 for migration testing. + chainContracts1 = _deployChainForMigration(1000001); + chainContracts2 = _deployChainForMigration(1000002); + } + + /// @notice Helper function to deploy a chain for migration testing. + /// @param _l2ChainId The L2 chain ID for the deployed chain. + /// @return cts_ The deployed chain contracts. + function _deployChainForMigration(uint256 _l2ChainId) + internal + returns (IOPContractsManagerV2.ChainContracts memory cts_) + { + // Set up dispute game configs first since they're needed for the struct literal. + address initialChallenger = permissionedGameChallenger(disputeGameFactory); + address initialProposer = permissionedGameProposer(disputeGameFactory); + IOPContractsManagerUtils.DisputeGameConfig[] memory dgConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](3); + dgConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: true, + initBond: 0.08 ether, + gameType: GameTypes.CANNON, + gameArgs: abi.encode(IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonPrestate })) + }); + dgConfigs[1] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: true, + initBond: 0.08 ether, + gameType: GameTypes.PERMISSIONED_CANNON, + gameArgs: abi.encode( + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ + absolutePrestate: cannonPrestate, + proposer: initialProposer, + challenger: initialChallenger + }) + ) + }); + dgConfigs[2] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: true, + initBond: 0.08 ether, + gameType: GameTypes.CANNON_KONA, + gameArgs: abi.encode(IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: cannonKonaPrestate })) + }); + + // Set up the deploy config using struct literal for compile-time field checking. + IOPContractsManagerV2.FullConfig memory deployConfig = IOPContractsManagerV2.FullConfig({ + saltMixer: string(abi.encodePacked("migrate-test-", _l2ChainId)), + superchainConfig: superchainConfig, + proxyAdminOwner: makeAddr("migrateProxyAdminOwner"), + systemConfigOwner: makeAddr("migrateSystemConfigOwner"), + unsafeBlockSigner: makeAddr("migrateUnsafeBlockSigner"), + batcher: makeAddr("migrateBatcher"), + startingAnchorRoot: Proposal({ root: Hash.wrap(bytes32(hex"1234")), l2SequenceNumber: 123 }), + startingRespectedGameType: GameTypes.PERMISSIONED_CANNON, + basefeeScalar: 1368, + blobBasefeeScalar: 801949, + gasLimit: 60_000_000, + l2ChainId: _l2ChainId, + resourceConfig: IResourceMetering.ResourceConfig({ + maxResourceLimit: 20_000_000, + elasticityMultiplier: 10, + baseFeeMaxChangeDenominator: 8, + minimumBaseFee: 1 gwei, + systemTxMaxGas: 1_000_000, + maximumBaseFee: type(uint128).max + }), + disputeGameConfigs: dgConfigs, + useCustomGasToken: false + }); + + // Deploy the chain. + cts_ = opcmV2.deploy(deployConfig); + } + + /// @notice Helper function to create the default migration input. + /// @return input_ The default migration input. + function _getDefaultMigrateInput() internal returns (IOPContractsManagerMigrator.MigrateInput memory input_) { + // Set up the chain system configs. + ISystemConfig[] memory chainSystemConfigs = new ISystemConfig[](2); + chainSystemConfigs[0] = chainContracts1.systemConfig; + chainSystemConfigs[1] = chainContracts2.systemConfig; + + // Set up the dispute game configs for super root games. + address proposer = makeAddr("superProposer"); + address challenger = makeAddr("superChallenger"); + + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](1); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: true, + initBond: 0.08 ether, + gameType: GameTypes.SUPER_PERMISSIONED_CANNON, + gameArgs: abi.encode( + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ + absolutePrestate: superPrestate, + proposer: proposer, + challenger: challenger + }) + ) + }); + + input_ = IOPContractsManagerMigrator.MigrateInput({ + chainSystemConfigs: chainSystemConfigs, + disputeGameConfigs: disputeGameConfigs, + startingAnchorRoot: Proposal({ root: Hash.wrap(bytes32(hex"ABBA")), l2SequenceNumber: 1234 }), + startingRespectedGameType: GameTypes.SUPER_PERMISSIONED_CANNON + }); + } + + /// @notice Helper function to execute a migration. + /// @param _input The input to the migration function. + function _doMigration(IOPContractsManagerMigrator.MigrateInput memory _input) internal { + _doMigration(_input, bytes4(0)); + } + + /// @notice Helper function to execute a migration with a revert selector. + /// @param _input The input to the migration function. + /// @param _revertSelector The selector of the revert to expect. + function _doMigration(IOPContractsManagerMigrator.MigrateInput memory _input, bytes4 _revertSelector) internal { + // Set the proxy admin owner to be a delegate caller. + address proxyAdminOwner = chainContracts1.proxyAdmin.owner(); + + // Execute a delegatecall to the OPCM migration function. + // Check gas usage of the migration function. + uint256 gasBefore = gasleft(); + if (_revertSelector != bytes4(0)) { + vm.expectRevert(_revertSelector); + } + prankDelegateCall(proxyAdminOwner); + (bool success,) = address(opcmV2).delegatecall(abi.encodeCall(IOPContractsManagerV2.migrate, (_input))); + assertTrue(success, "migrate failed"); + uint256 gasAfter = gasleft(); + + // Make sure the gas usage is less than 20 million so we can definitely fit in a block. + assertLt(gasBefore - gasAfter, 20_000_000, "Gas usage too high"); + } + + /// @notice Helper function to assert that the old game implementations are now zeroed out. + /// @param _disputeGameFactory The dispute game factory to check. + function _assertOldGamesZeroed(IDisputeGameFactory _disputeGameFactory) internal view { + // Assert that the old game implementations are now zeroed out. + _assertGameIsEmpty(_disputeGameFactory, GameTypes.CANNON, "CANNON"); + _assertGameIsEmpty(_disputeGameFactory, GameTypes.SUPER_CANNON, "SUPER_CANNON"); + _assertGameIsEmpty(_disputeGameFactory, GameTypes.PERMISSIONED_CANNON, "PERMISSIONED_CANNON"); + _assertGameIsEmpty(_disputeGameFactory, GameTypes.SUPER_PERMISSIONED_CANNON, "SUPER_PERMISSIONED_CANNON"); + _assertGameIsEmpty(_disputeGameFactory, GameTypes.CANNON_KONA, "CANNON_KONA"); + _assertGameIsEmpty(_disputeGameFactory, GameTypes.SUPER_CANNON_KONA, "SUPER_CANNON_KONA"); + } + + /// @notice Helper function to assert a game is empty. + /// @param _dgf The dispute game factory. + /// @param _gameType The game type. + /// @param _label The label for the game type. + function _assertGameIsEmpty(IDisputeGameFactory _dgf, GameType _gameType, string memory _label) internal view { + assertEq( + address(_dgf.gameImpls(_gameType)), + address(0), + string.concat("Game type set when it should not be: ", _label) + ); + assertEq(_dgf.gameArgs(_gameType), hex"", string.concat("Game args should be empty: ", _label)); + } + + /// @notice Tests that the migration function succeeds and liquidity is migrated. + function test_migrate_succeeds() public { + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + + // Pre-migration setup: Get old lockboxes and fund them. + IETHLockbox oldLockbox1; + IETHLockbox oldLockbox2; + uint256 lockbox1Balance = 10 ether; + uint256 lockbox2Balance = 5 ether; + { + IOptimismPortal2 oldPortal1 = IOptimismPortal2(payable(chainContracts1.systemConfig.optimismPortal())); + IOptimismPortal2 oldPortal2 = IOptimismPortal2(payable(chainContracts2.systemConfig.optimismPortal())); + oldLockbox1 = oldPortal1.ethLockbox(); + oldLockbox2 = oldPortal2.ethLockbox(); + vm.deal(address(oldLockbox1), lockbox1Balance); + vm.deal(address(oldLockbox2), lockbox2Balance); + } + + // Pre-migration: Get old DisputeGameFactories. + IDisputeGameFactory oldDGF1 = IDisputeGameFactory(payable(chainContracts1.systemConfig.disputeGameFactory())); + IDisputeGameFactory oldDGF2 = IDisputeGameFactory(payable(chainContracts2.systemConfig.disputeGameFactory())); + + // Execute the migration. + _doMigration(input); + + // Assert that the old game implementations are now zeroed out. + _assertOldGamesZeroed(oldDGF1); + _assertOldGamesZeroed(oldDGF2); + + // Grab the two OptimismPortal addresses. + IOptimismPortal2 portal1 = IOptimismPortal2(payable(chainContracts1.systemConfig.optimismPortal())); + IOptimismPortal2 portal2 = IOptimismPortal2(payable(chainContracts2.systemConfig.optimismPortal())); + + // Grab the AnchorStateRegistry from the OptimismPortal for both chains, confirm same. + assertEq( + address(portal1.anchorStateRegistry()), + address(portal2.anchorStateRegistry()), + "AnchorStateRegistry mismatch" + ); + + // Extract the AnchorStateRegistry now that we know it's the same on both chains. + IAnchorStateRegistry asr = portal1.anchorStateRegistry(); + + // Check that the starting anchor root is the same as the input. + { + (Hash root, uint256 l2SeqNum) = asr.getAnchorRoot(); + assertEq(root.raw(), input.startingAnchorRoot.root.raw(), "Starting anchor root mismatch"); + assertEq(l2SeqNum, input.startingAnchorRoot.l2SequenceNumber, "Starting anchor root L2 seq num mismatch"); + } + + // Grab the ETHLockbox from the OptimismPortal for both chains, confirm same. + assertEq(address(portal1.ethLockbox()), address(portal2.ethLockbox()), "ETHLockbox mismatch"); + + // Extract the new ETHLockbox now that we know it's the same on both chains. + IETHLockbox newLockbox = portal1.ethLockbox(); + + // Check that the ETHLockbox has authorized portals. + assertTrue(newLockbox.authorizedPortals(portal1), "ETHLockbox does not have portal 1 authorized"); + assertTrue(newLockbox.authorizedPortals(portal2), "ETHLockbox does not have portal 2 authorized"); + + // Check that superRootsActive is true on both portals. + assertTrue( + IOptimismPortalInterop(payable(address(portal1))).superRootsActive(), + "Portal 1 superRootsActive should be true" + ); + assertTrue( + IOptimismPortalInterop(payable(address(portal2))).superRootsActive(), + "Portal 2 superRootsActive should be true" + ); + + // Check that the ETH_LOCKBOX feature is enabled on both SystemConfigs. + assertTrue( + chainContracts1.systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX), + "Chain 1 ETH_LOCKBOX feature should be enabled" + ); + assertTrue( + chainContracts2.systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX), + "Chain 2 ETH_LOCKBOX feature should be enabled" + ); + + // Check that the init bonds are set correctly on the new DisputeGameFactory. + assertEq( + IDisputeGameFactory(asr.disputeGameFactory()).initBonds(GameTypes.SUPER_PERMISSIONED_CANNON), + 0.08 ether, + "SUPER_PERMISSIONED_CANNON init bond mismatch" + ); + + // Check that liquidity was migrated from old lockboxes to the new shared lockbox. + assertEq(address(oldLockbox1).balance, 0, "Old lockbox 1 should have 0 balance after migration"); + assertEq(address(oldLockbox2).balance, 0, "Old lockbox 2 should have 0 balance after migration"); + assertEq( + address(newLockbox).balance, + lockbox1Balance + lockbox2Balance, + "New lockbox should have combined balance from both old lockboxes" + ); + + // Check that the old lockboxes are authorized on the new lockbox. + assertTrue(newLockbox.authorizedLockboxes(oldLockbox1), "Old lockbox 1 should be authorized on new lockbox"); + assertTrue(newLockbox.authorizedLockboxes(oldLockbox2), "Old lockbox 2 should be authorized on new lockbox"); + } + + /// @notice Tests that the migration function reverts when the ProxyAdmin owners are mismatched. + /// @param _owner1 The owner address for the first chain's ProxyAdmin. + /// @param _owner2 The owner address for the second chain's ProxyAdmin. + function testFuzz_migrate_mismatchedProxyAdminOwners_reverts(address _owner1, address _owner2) public { + vm.assume(_owner1 != _owner2); + assumeNotPrecompile(_owner1); + assumeNotPrecompile(_owner2); + assumeNotForgeAddress(_owner1); + assumeNotForgeAddress(_owner2); + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + + // Mock out the owners of the ProxyAdmins to be different. + vm.mockCall( + address(input.chainSystemConfigs[0].proxyAdmin()), + abi.encodeCall(IProxyAdmin.owner, ()), + abi.encode(_owner1) + ); + vm.mockCall( + address(input.chainSystemConfigs[1].proxyAdmin()), + abi.encodeCall(IProxyAdmin.owner, ()), + abi.encode(_owner2) + ); + + // Execute the migration, expect revert. + _doMigration(input, IOPContractsManagerMigrator.OPContractsManagerMigrator_ProxyAdminOwnerMismatch.selector); + } + + /// @notice Tests that the migration function reverts when the SuperchainConfig addresses are mismatched. + /// @param _config1 The SuperchainConfig address for the first chain. + /// @param _config2 The SuperchainConfig address for the second chain. + function testFuzz_migrate_mismatchedSuperchainConfig_reverts(address _config1, address _config2) public { + vm.assume(_config1 != _config2); + assumeNotPrecompile(_config1); + assumeNotPrecompile(_config2); + assumeNotForgeAddress(_config1); + assumeNotForgeAddress(_config2); + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + + // Mock out the SuperchainConfig addresses to be different. + vm.mockCall( + address(input.chainSystemConfigs[0]), + abi.encodeCall(ISystemConfig.superchainConfig, ()), + abi.encode(_config1) + ); + vm.mockCall( + address(input.chainSystemConfigs[1]), + abi.encodeCall(ISystemConfig.superchainConfig, ()), + abi.encode(_config2) + ); + + // Execute the migration, expect revert. + _doMigration(input, IOPContractsManagerMigrator.OPContractsManagerMigrator_SuperchainConfigMismatch.selector); + } + + /// @notice Tests that the migration function reverts when the starting respected game type is invalid. + /// @param _gameTypeRaw The raw game type value to test. + function testFuzz_migrate_invalidStartingRespectedGameType_reverts(uint32 _gameTypeRaw) public { + // Only SUPER_CANNON (4) and SUPER_PERMISSIONED_CANNON (5) are valid for migration. + vm.assume(_gameTypeRaw != GameTypes.SUPER_CANNON.raw()); + vm.assume(_gameTypeRaw != GameTypes.SUPER_PERMISSIONED_CANNON.raw()); + + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + + // Set an invalid starting respected game type. + input.startingRespectedGameType = GameType.wrap(_gameTypeRaw); + + // Execute the migration, expect revert. + _doMigration( + input, IOPContractsManagerMigrator.OPContractsManagerMigrator_InvalidStartingRespectedGameType.selector + ); + } +} diff --git a/packages/contracts-bedrock/test/L2/CrossDomainOwnable.t.sol b/packages/contracts-bedrock/test/L2/CrossDomainOwnable.t.sol index 56c3f74dda1eb..99898df6f4d3c 100644 --- a/packages/contracts-bedrock/test/L2/CrossDomainOwnable.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossDomainOwnable.t.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing +import { Test } from "test/setup/Test.sol"; import { VmSafe } from "forge-std/Vm.sol"; -import { Test } from "forge-std/Test.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 323ac2c5b06ca..7d1b2460099bb 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { VmSafe } from "forge-std/Vm.sol"; diff --git a/packages/contracts-bedrock/test/L2/FeeSplitterVaults.t.sol b/packages/contracts-bedrock/test/L2/FeeSplitterVaults.t.sol index fbb475373cdf5..c316e23af8a2c 100644 --- a/packages/contracts-bedrock/test/L2/FeeSplitterVaults.t.sol +++ b/packages/contracts-bedrock/test/L2/FeeSplitterVaults.t.sol @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Libraries -import { Predeploys } from "src/libraries/Predeploys.sol"; - // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; import { FeeSplitterForTest } from "test/mocks/FeeSplitterForTest.sol"; +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; + // Interfaces import { IFeeSplitter } from "interfaces/L2/IFeeSplitter.sol"; diff --git a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol index e0f959333c445..72b74f0c36b8f 100644 --- a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol +++ b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol @@ -1,13 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; import { Fork } from "scripts/libraries/Config.sol"; +import { stdError } from "forge-std/StdError.sol"; // Libraries import { Encoding } from "src/libraries/Encoding.sol"; -import { stdError } from "forge-std/Test.sol"; contract GasPriceOracle_Test is CommonTest { address depositor; diff --git a/packages/contracts-bedrock/test/L2/L1Block.t.sol b/packages/contracts-bedrock/test/L2/L1Block.t.sol index 5d36bb8863124..8b31d1e2ac39a 100644 --- a/packages/contracts-bedrock/test/L2/L1Block.t.sol +++ b/packages/contracts-bedrock/test/L2/L1Block.t.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; -import { stdStorage, StdStorage } from "forge-std/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; // Libraries import { Encoding } from "src/libraries/Encoding.sol"; diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol index a0f1c0aec8c0e..8ea1e57ad20f0 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { stdStorage, StdStorage } from "forge-std/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index 9b2d86423605e..508aac4659066 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { Vm } from "forge-std/Vm.sol"; // Libraries diff --git a/packages/contracts-bedrock/test/L2/LiquidityController.t.sol b/packages/contracts-bedrock/test/L2/LiquidityController.t.sol index 82208200fccff..9b77c45ea756c 100644 --- a/packages/contracts-bedrock/test/L2/LiquidityController.t.sol +++ b/packages/contracts-bedrock/test/L2/LiquidityController.t.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; -import { stdStorage, StdStorage } from "forge-std/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; // Libraries import { Features } from "src/libraries/Features.sol"; diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index 3479ec0330dea..3950de5901acb 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Libraries diff --git a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol index b409c2c42bd1b..7835c11542bd8 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol index 059d66cc9c119..daf0f1546eeb3 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/test/cannon/MIPS64.t.sol b/packages/contracts-bedrock/test/cannon/MIPS64.t.sol index 6e3caaa16955f..417e476fa53bf 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS64.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS64.t.sol @@ -1,10 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Libraries import { UnsupportedStateVersion } from "src/cannon/libraries/CannonErrors.sol"; + +// Interfaces import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; @@ -14,7 +20,7 @@ abstract contract MIPS64_TestInit is Test { IPreimageOracle oracle; // Store some data about acceptable versions - uint256[2] validVersions = [7, 8]; + uint256[1] validVersions = [uint256(8)]; mapping(uint256 => bool) public isValidVersion; uint256 maxValidVersion; diff --git a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol index 59cef2f3653fe..ce423dae10d6d 100644 --- a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol +++ b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol @@ -2,7 +2,9 @@ pragma solidity 0.8.15; // Testing -import { Test, Vm, console2 as console } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; +import { Vm } from "forge-std/Vm.sol"; +import { console2 as console } from "forge-std/console2.sol"; // Scripts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/dispute/AnchorStateRegistry.t.sol b/packages/contracts-bedrock/test/dispute/AnchorStateRegistry.t.sol index 2334ce904da4d..a33f69642154c 100644 --- a/packages/contracts-bedrock/test/dispute/AnchorStateRegistry.t.sol +++ b/packages/contracts-bedrock/test/dispute/AnchorStateRegistry.t.sol @@ -467,7 +467,7 @@ contract AnchorStateRegistry_GetStartingAnchorRoot_Test is AnchorStateRegistry_T // Mock the game's anchor root to be different from the starting anchor root. vm.mockCall( address(gameProxy), - abi.encodeCall(gameProxy.rootClaim, ()), + abi.encodeCall(IDisputeGame.rootClaim, ()), abi.encode(Claim.wrap(keccak256(abi.encode(123)))) ); diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index e17fde3c0a673..74e93e505fe86 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -25,6 +25,12 @@ import { IPermissionedDisputeGameV2 } from "interfaces/dispute/v2/IPermissionedD import { ISuperPermissionedDisputeGame } from "interfaces/dispute/ISuperPermissionedDisputeGame.sol"; // Mocks import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; +import { SP1MockVerifier } from "test/dispute/zk/mocks/SP1MockVerifier.sol"; + +// OptimisticZk +import { OptimisticZkGame } from "src/dispute/zk/OptimisticZkGame.sol"; +import { AccessManager } from "src/dispute/zk/AccessManager.sol"; +import { ISP1Verifier } from "src/dispute/zk/ISP1Verifier.sol"; /// @notice A fake clone used for testing the `DisputeGameFactory` contract's `create` function. contract DisputeGameFactory_FakeClone_Harness { @@ -144,7 +150,7 @@ abstract contract DisputeGameFactory_TestInit is CommonTest { } else { disputeGameFactory.setImplementation(_gameType, IDisputeGame(_gameImpl)); } - disputeGameFactory.setInitBond(_gameType, 0.08 ether); + disputeGameFactory.setInitBond(_gameType, DEFAULT_DISPUTE_GAME_INIT_BOND); vm.stopPrank(); } @@ -388,6 +394,59 @@ abstract contract DisputeGameFactory_TestInit is CommonTest { }); _setGame(gameImpl_, GameTypes.SUPER_PERMISSIONED_CANNON, _implArgs); } + + /// @notice Parameters for OptimisticZk game setup + struct OptimisticZkGameParams { + Duration maxChallengeDuration; + Duration maxProveDuration; + address proposer; + address challenger; + bytes32 rollupConfigHash; + bytes32 aggregationVkey; + bytes32 rangeVkeyCommitment; + uint256 challengerBond; + } + + /// @notice Sets up an OptimisticZk game implementation + function setupOptimisticZkGame(OptimisticZkGameParams memory _params) + internal + returns (address gameImpl_, AccessManager accessManager_, ISP1Verifier sp1Verifier_) + { + // Deploy mock verifier + sp1Verifier_ = ISP1Verifier(address(new SP1MockVerifier())); + + // Deploy access manager + accessManager_ = new AccessManager(2 weeks, disputeGameFactory); + accessManager_.setProposer(_params.proposer, true); + accessManager_.setChallenger(_params.challenger, true); + + // Deploy game implementation + gameImpl_ = address( + new OptimisticZkGame( + _params.maxChallengeDuration, + _params.maxProveDuration, + disputeGameFactory, + sp1Verifier_, + _params.rollupConfigHash, + _params.aggregationVkey, + _params.rangeVkeyCommitment, + _params.challengerBond, + anchorStateRegistry, + accessManager_ + ) + ); + + // Set respected game type for OptimisticZk + GameType gameType = GameTypes.OPTIMISTIC_ZK_GAME_TYPE; + vm.prank(superchainConfig.guardian()); + anchorStateRegistry.setRespectedGameType(gameType); + + // Register with factory + vm.startPrank(disputeGameFactory.owner()); + disputeGameFactory.setImplementation(gameType, IDisputeGame(gameImpl_)); + disputeGameFactory.setInitBond(gameType, _params.challengerBond); + vm.stopPrank(); + } } /// @title DisputeGameFactory_Initialize_Test diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 3ad1f2a0b9e28..87ca2a403f02e 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -2078,6 +2078,17 @@ contract FaultDisputeGame_RootClaim_Test is FaultDisputeGame_TestInit { function test_rootClaim_succeeds() public view { assertEq(gameProxy.rootClaim().raw(), ROOT_CLAIM.raw()); } + + /// @notice Tests that rootClaimByChainId returns the same value as rootClaim(). + function test_rootClaimByChainId_succeeds() public view { + assertEq(gameProxy.rootClaimByChainId(gameProxy.l2ChainId()).raw(), gameProxy.rootClaim().raw()); + } + + /// @notice Tests that rootClaimByChainId reverts with unknown chain ID. + function test_rootClaimByChainId_unknownChainId_reverts() public { + vm.expectRevert(UnknownChainId.selector); + gameProxy.rootClaimByChainId(0); + } } /// @title FaultDisputeGame_ExtraData_Test @@ -2112,7 +2123,7 @@ contract FaultDisputeGame_GetRequiredBond_Test is FaultDisputeGame_TestInit { uint256 bond = gameProxy.getRequiredBond(pos); // Reasonable approximation for a max depth of 8. - uint256 expected = 0.08 ether; + uint256 expected = DEFAULT_DISPUTE_GAME_INIT_BOND; for (uint64 j = 0; j < i; j++) { expected = expected * 22876; expected = expected / 10000; diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index e5f15c3424c74..4c344d6d1a96b 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -415,3 +415,19 @@ contract PermissionedDisputeGame_Uncategorized_Test is PermissionedDisputeGame_T vm.stopPrank(); } } + +/// @title PermissionedDisputeGame_RootClaimByChainId_Test +/// @notice Tests the `rootClaimByChainId` function. +contract PermissionedDisputeGame_RootClaimByChainId_Test is PermissionedDisputeGame_TestInit { + /// @notice Tests that rootClaimByChainId returns the same value as rootClaim(). + function test_rootClaimByChainId_succeeds() public view { + assertEq(gameProxy.rootClaimByChainId(gameProxy.l2ChainId()).raw(), gameProxy.rootClaim().raw()); + } + + /// @notice Tests that rootClaimByChainId reverts with unknown chain ID. + function test_rootClaimByChainId_unknownChainId_reverts(uint256 _chainId) public { + vm.assume(_chainId != gameProxy.l2ChainId()); + vm.expectRevert(UnknownChainId.selector); + gameProxy.rootClaimByChainId(_chainId); + } +} diff --git a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol index c3a78ce42f841..a25ae92bf9b19 100644 --- a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol @@ -1950,7 +1950,7 @@ contract SuperFaultDisputeGame_GetRequiredBond_Test is SuperFaultDisputeGame_Tes uint256 bond = gameProxy.getRequiredBond(pos); // Reasonable approximation for a max depth of 8. - uint256 expected = 0.08 ether; + uint256 expected = DEFAULT_DISPUTE_GAME_INIT_BOND; for (uint64 j = 0; j < i; j++) { expected = expected * 22876; expected = expected / 10000; diff --git a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol index c3d0dd5412bc2..f3b216e82458d 100644 --- a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol @@ -372,3 +372,22 @@ contract SuperPermissionedDisputeGame_Initialize_Test is SuperPermissionedDisput ); } } + +/// @title SuperPermissionedDisputeGame_RootClaimByChainId_Test +/// @notice Tests the `rootClaimByChainId` function. +contract SuperPermissionedDisputeGame_RootClaimByChainId_Test is SuperPermissionedDisputeGame_TestInit { + /// @notice Tests that the game's root claim for each output root is set correctly. + function test_rootClaimForOutputRoot_succeeds() public view { + for (uint256 i = 0; i < superRootProof.outputRoots.length; i++) { + uint256 chainId = superRootProof.outputRoots[i].chainId; + assertEq(gameProxy.rootClaimByChainId(chainId).raw(), superRootProof.outputRoots[i].root); + } + } + + /// @notice Tests that requesting the root claim for an unknown chain ID reverts. + function test_rootClaimForOutputRoot_unknownChainId_reverts() public { + uint256 invalidChainId = 9999; + vm.expectRevert(UnknownChainId.selector); + gameProxy.rootClaimByChainId(invalidChainId); + } +} diff --git a/packages/contracts-bedrock/test/dispute/lib/LibClock.t.sol b/packages/contracts-bedrock/test/dispute/lib/LibClock.t.sol index 4bf90c1dfc103..8ec76658b9b22 100644 --- a/packages/contracts-bedrock/test/dispute/lib/LibClock.t.sol +++ b/packages/contracts-bedrock/test/dispute/lib/LibClock.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { LibClock } from "src/dispute/lib/LibUDT.sol"; import "src/dispute/lib/Types.sol"; diff --git a/packages/contracts-bedrock/test/dispute/lib/LibGameArgs.t.sol b/packages/contracts-bedrock/test/dispute/lib/LibGameArgs.t.sol index 6fd4c21e22a98..1eee52105da47 100644 --- a/packages/contracts-bedrock/test/dispute/lib/LibGameArgs.t.sol +++ b/packages/contracts-bedrock/test/dispute/lib/LibGameArgs.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { LibGameArgs } from "src/dispute/lib/LibGameArgs.sol"; import { InvalidGameArgsLength } from "src/dispute/lib/Errors.sol"; diff --git a/packages/contracts-bedrock/test/dispute/lib/LibGameId.t.sol b/packages/contracts-bedrock/test/dispute/lib/LibGameId.t.sol index b1878178c1aef..d8ea4729d6cdb 100644 --- a/packages/contracts-bedrock/test/dispute/lib/LibGameId.t.sol +++ b/packages/contracts-bedrock/test/dispute/lib/LibGameId.t.sol @@ -1,8 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Libraries import "src/dispute/lib/Types.sol"; /// @title LibGameId_Pack_Test diff --git a/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol b/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol index 8d392fa49c55a..d38223ff0d0c7 100644 --- a/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol +++ b/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { LibPosition } from "src/dispute/lib/LibPosition.sol"; import "src/dispute/lib/Types.sol"; diff --git a/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol b/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol new file mode 100644 index 0000000000000..fff896b9872e7 --- /dev/null +++ b/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol @@ -0,0 +1,732 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing +import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; + +// Libraries +import { Claim, Duration, GameStatus, GameType, Timestamp } from "src/dispute/lib/Types.sol"; +import { + BadAuth, + IncorrectBondAmount, + UnexpectedRootClaim, + NoCreditToClaim, + GameNotFinalized, + ParentGameNotResolved, + InvalidParentGame, + ClaimAlreadyChallenged, + GameOver, + GameNotOver, + IncorrectDisputeGameFactory +} from "src/dispute/lib/Errors.sol"; +import { GameTypes } from "src/dispute/lib/Types.sol"; + +// Contracts +import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; +import { OptimisticZkGame } from "src/dispute/zk/OptimisticZkGame.sol"; +import { AccessManager } from "src/dispute/zk/AccessManager.sol"; + +// Interfaces +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { Proxy } from "src/universal/Proxy.sol"; + +/// @title OptimisticZkGame_TestInit +/// @notice Base test contract with shared setup for OptimisticZkGame tests. +abstract contract OptimisticZkGame_TestInit is DisputeGameFactory_TestInit { + // Events + event Challenged(address indexed challenger); + event Proved(address indexed prover); + event Resolved(GameStatus indexed status); + + OptimisticZkGame gameImpl; + OptimisticZkGame parentGame; + OptimisticZkGame game; + AccessManager accessManager; + + address proposer = address(0x123); + address challenger = address(0x456); + address prover = address(0x789); + + // Fixed parameters. + GameType gameType = GameTypes.OPTIMISTIC_ZK_GAME_TYPE; + Duration maxChallengeDuration = Duration.wrap(12 hours); + Duration maxProveDuration = Duration.wrap(3 days); + Claim rootClaim = Claim.wrap(keccak256("rootClaim")); + + // Sequence number offsets from anchor state (for parent and child games). + uint256 parentSequenceOffset = 1000; + uint256 childSequenceOffset = 2000; + + // Game indices are set dynamically in setUp (on fork, existing games already exist) + uint32 parentGameIndex; + uint32 childGameIndex; + + // Offsets from child sequence number for grandchild games. + uint256 grandchildOffset1 = 1000; + uint256 grandchildOffset2 = 2000; + uint256 grandchildOffset3 = 3000; + uint256 grandchildOffset4 = 8000; + + // Actual sequence numbers (set in setUp based on anchor state) + uint256 anchorL2SequenceNumber; + uint256 parentL2SequenceNumber; + uint256 childL2SequenceNumber; + + // For a new parent game that we manipulate separately in some tests. + OptimisticZkGame separateParentGame; + + function setUp() public virtual override { + super.setUp(); + skipIfForkTest("Skip not supported yet"); + + // Get anchor state to calculate valid sequence numbers + (, anchorL2SequenceNumber) = anchorStateRegistry.getAnchorRoot(); + parentL2SequenceNumber = anchorL2SequenceNumber + parentSequenceOffset; + childL2SequenceNumber = anchorL2SequenceNumber + childSequenceOffset; + + // Setup game implementation using shared helper + address impl; + (impl, accessManager,) = setupOptimisticZkGame( + OptimisticZkGameParams({ + maxChallengeDuration: maxChallengeDuration, + maxProveDuration: maxProveDuration, + proposer: proposer, + challenger: challenger, + rollupConfigHash: bytes32(0), + aggregationVkey: bytes32(0), + rangeVkeyCommitment: bytes32(0), + challengerBond: 1 ether + }) + ); + gameImpl = OptimisticZkGame(impl); + + // Create the first (parent) game – it uses uint32.max as parent index. + vm.startPrank(proposer); + vm.deal(proposer, 2 ether); + + // Warp time forward to ensure the parent game is created after the respectedGameTypeUpdatedAt timestamp. + vm.warp(block.timestamp + 1000); + + // Create parent game (uses uint32.max to indicate first game in chain). + parentGame = OptimisticZkGame( + address( + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("genesis")), + abi.encodePacked(parentL2SequenceNumber, type(uint32).max) + ) + ) + ); + + // Record actual index of parent game (on fork, existing games already occupy indices 0, 1, ...) + parentGameIndex = uint32(disputeGameFactory.gameCount() - 1); + + // We want the parent game to finalize. We'll skip its challenge period. + (,,,,, Timestamp parentGameDeadline) = parentGame.claimData(); + vm.warp(parentGameDeadline.raw() + 1 seconds); + parentGame.resolve(); + + uint256 finalityDelay = anchorStateRegistry.disputeGameFinalityDelaySeconds(); + vm.warp(parentGame.resolvedAt().raw() + finalityDelay + 1 seconds); + parentGame.claimCredit(proposer); + + // Create the child game referencing actual parent game index. + game = OptimisticZkGame( + address( + disputeGameFactory.create{ value: 1 ether }( + gameType, rootClaim, abi.encodePacked(childL2SequenceNumber, parentGameIndex) + ) + ) + ); + + // Record actual index of child game. + childGameIndex = uint32(disputeGameFactory.gameCount() - 1); + + vm.stopPrank(); + } +} + +/// @title OptimisticZkGame_Initialize_Test +/// @notice Tests for initialization of OptimisticZkGame. +contract OptimisticZkGame_Initialize_Test is OptimisticZkGame_TestInit { + function test_initialize_succeeds() public view { + // Test that the factory is correctly initialized. + assertEq(address(disputeGameFactory.owner()), address(this)); + assertEq(address(disputeGameFactory.gameImpls(gameType)), address(gameImpl)); + // We expect games including parent and child (indices may vary on fork). + assertEq(disputeGameFactory.gameCount(), childGameIndex + 1); + + // Check that our child game matches the game at childGameIndex. + (,, IDisputeGame proxy_) = disputeGameFactory.gameAtIndex(childGameIndex); + assertEq(address(game), address(proxy_)); + + // Check the child game fields. + assertEq(game.gameType().raw(), gameType.raw()); + assertEq(game.rootClaim().raw(), rootClaim.raw()); + assertEq(game.maxChallengeDuration().raw(), maxChallengeDuration.raw()); + assertEq(game.maxProveDuration().raw(), maxProveDuration.raw()); + assertEq(address(game.disputeGameFactory()), address(disputeGameFactory)); + assertEq(game.l2SequenceNumber(), childL2SequenceNumber); + + // The parent's sequence number (startingBlockNumber() returns l2SequenceNumber). + assertEq(game.startingBlockNumber(), parentL2SequenceNumber); + + // The parent's root was keccak256("genesis"). + assertEq(game.startingRootHash().raw(), keccak256("genesis")); + + assertEq(address(game).balance, 1 ether); + + // Check the claimData. + ( + uint32 parentIndex_, + address counteredBy_, + address prover_, + Claim claim_, + OptimisticZkGame.ProposalStatus status_, + Timestamp deadline_ + ) = game.claimData(); + + assertEq(parentIndex_, parentGameIndex); + assertEq(counteredBy_, address(0)); + assertEq(game.gameCreator(), proposer); + assertEq(prover_, address(0)); + assertEq(claim_.raw(), rootClaim.raw()); + + // Initially, the status is Unchallenged. + assertEq(uint8(status_), uint8(OptimisticZkGame.ProposalStatus.Unchallenged)); + + // The child's initial deadline is block.timestamp + maxChallengeDuration. + uint256 currentTime = block.timestamp; + uint256 expectedDeadline = currentTime + maxChallengeDuration.raw(); + assertEq(deadline_.raw(), expectedDeadline); + } + + function test_initialize_blockNumberTooSmall_reverts() public { + // Try to create a child game that references a block number smaller than parent's. + vm.startPrank(proposer); + vm.deal(proposer, 1 ether); + + // We expect revert because l2BlockNumber (1) < parent's block number + vm.expectRevert( + abi.encodeWithSelector( + UnexpectedRootClaim.selector, + Claim.wrap(keccak256("rootClaim")) // The rootClaim we pass. + ) + ); + + disputeGameFactory.create{ value: 1 ether }( + gameType, + rootClaim, + abi.encodePacked(uint256(1), parentGameIndex) // L2 block is smaller than parent's block. + ); + vm.stopPrank(); + } + + function test_initialize_parentBlacklisted_reverts() public { + // Blacklist the game on the anchor state registry (which is what's actually used for validation). + vm.prank(superchainConfig.guardian()); + anchorStateRegistry.blacklistDisputeGame(IDisputeGame(address(game))); + + vm.startPrank(proposer); + vm.deal(proposer, 1 ether); + vm.expectRevert(InvalidParentGame.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("blacklisted-parent-game")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ); + vm.stopPrank(); + } + + function test_initialize_parentNotRespected_reverts() public { + // Create a new game which will be the parent. + vm.startPrank(proposer); + vm.deal(proposer, 1 ether); + OptimisticZkGame parentNotRespected = OptimisticZkGame( + address( + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("not-respected-parent-game")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ) + ) + ); + uint32 parentNotRespectedIndex = uint32(disputeGameFactory.gameCount() - 1); + vm.stopPrank(); + + // Blacklist the parent game to make it invalid. + vm.prank(superchainConfig.guardian()); + anchorStateRegistry.blacklistDisputeGame(IDisputeGame(address(parentNotRespected))); + + // Try to create a game with a parent game that is not valid. + vm.startPrank(proposer); + vm.deal(proposer, 1 ether); + vm.expectRevert(InvalidParentGame.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("child-with-not-respected-parent")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset2, parentNotRespectedIndex) + ); + vm.stopPrank(); + } + + function test_initialize_noPermission_reverts() public { + address maliciousProposer = address(0x1234); + + vm.startPrank(maliciousProposer); + vm.deal(maliciousProposer, 1 ether); + + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ); + + vm.stopPrank(); + } + + function test_initialize_wrongFactory_reverts() public { + // Deploy the implementation contract for new DisputeGameFactory. + DisputeGameFactory newFactoryImpl = new DisputeGameFactory(); + + // Deploy a proxy pointing to the new factory implementation. + Proxy newFactoryProxyContract = new Proxy(address(this)); + newFactoryProxyContract.upgradeTo(address(newFactoryImpl)); + + // Cast the proxy to the DisputeGameFactory interface and initialize it. + DisputeGameFactory newFactory = DisputeGameFactory(address(newFactoryProxyContract)); + newFactory.initialize(address(this)); + + // Set the implementation with the same implementation as the old disputeGameFactory. + newFactory.setImplementation(gameType, IDisputeGame(address(gameImpl))); + newFactory.setInitBond(gameType, 1 ether); + + vm.startPrank(proposer); + vm.deal(proposer, 1 ether); + + vm.expectRevert(IncorrectDisputeGameFactory.selector); + newFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ); + + vm.stopPrank(); + } +} + +/// @title OptimisticZkGame_Resolve_Test +/// @notice Tests for resolve functionality of OptimisticZkGame. +contract OptimisticZkGame_Resolve_Test is OptimisticZkGame_TestInit { + function test_resolve_unchallenged_succeeds() public { + assertEq(uint8(game.status()), uint8(GameStatus.IN_PROGRESS)); + + // Should revert if we try to resolve before deadline. + vm.expectRevert(GameNotOver.selector); + game.resolve(); + + // Warp forward past the challenge deadline. + (,,,,, Timestamp deadline) = game.claimData(); + vm.warp(deadline.raw() + 1); + + // Expect the Resolved event. + vm.expectEmit(true, false, false, false, address(game)); + emit Resolved(GameStatus.DEFENDER_WINS); + + // Now we can resolve successfully. + game.resolve(); + + // Proposer gets the bond back. + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + game.claimCredit(proposer); + + // Check final state + assertEq(uint8(game.status()), uint8(GameStatus.DEFENDER_WINS)); + // The contract should have paid back the proposer. + assertEq(address(game).balance, 0); + // Proposer posted 1 ether, so they get it back. + assertEq(proposer.balance, 2 ether); + assertEq(challenger.balance, 0); + } + + function test_resolve_unchallengedWithProof_succeeds() public { + assertEq(uint8(game.status()), uint8(GameStatus.IN_PROGRESS)); + + // Should revert if we try to resolve before the first challenge deadline. + vm.expectRevert(GameNotOver.selector); + game.resolve(); + + // Prover proves the claim while unchallenged. + vm.startPrank(prover); + game.prove(bytes("")); + vm.stopPrank(); + + // Now the proposal is UnchallengedAndValidProofProvided; we can resolve immediately. + game.resolve(); + + // Prover does not get any credit. + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + vm.expectRevert(NoCreditToClaim.selector); + game.claimCredit(prover); + + // Proposer gets the bond back. + game.claimCredit(proposer); + + // Final status: DEFENDER_WINS. + assertEq(uint8(game.status()), uint8(GameStatus.DEFENDER_WINS)); + assertEq(address(game).balance, 0); + + // Proposer gets their 1 ether back. + assertEq(proposer.balance, 2 ether); + // Prover does NOT get the reward because no challenger posted a bond. + assertEq(prover.balance, 0 ether); + assertEq(challenger.balance, 0); + } + + function test_resolve_challengedWithProof_succeeds() public { + assertEq(uint8(game.status()), uint8(GameStatus.IN_PROGRESS)); + assertEq(address(game).balance, 1 ether); + + // Try to resolve too early. + vm.expectRevert(GameNotOver.selector); + game.resolve(); + + // Challenger posts the bond incorrectly. + vm.startPrank(challenger); + vm.deal(challenger, 1 ether); + + // Must pay exactly the required bond. + vm.expectRevert(IncorrectBondAmount.selector); + game.challenge{ value: 0.5 ether }(); + + // Correctly challenge the game. + game.challenge{ value: 1 ether }(); + vm.stopPrank(); + + // Now the contract holds 2 ether total. + assertEq(address(game).balance, 2 ether); + + // Confirm the proposal is in Challenged state. + (, address counteredBy_,,, OptimisticZkGame.ProposalStatus challStatus,) = game.claimData(); + assertEq(counteredBy_, challenger); + assertEq(uint8(challStatus), uint8(OptimisticZkGame.ProposalStatus.Challenged)); + + // Prover proves the claim in time. + vm.startPrank(prover); + game.prove(bytes("")); + vm.stopPrank(); + + // Confirm the proposal is now ChallengedAndValidProofProvided. + (,,,, challStatus,) = game.claimData(); + assertEq(uint8(challStatus), uint8(OptimisticZkGame.ProposalStatus.ChallengedAndValidProofProvided)); + assertEq(uint8(game.status()), uint8(GameStatus.IN_PROGRESS)); + + // Resolve the game. + game.resolve(); + + // Prover gets the proof reward. + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + game.claimCredit(prover); + + // Proposer gets the bond back. + game.claimCredit(proposer); + + assertEq(uint8(game.status()), uint8(GameStatus.DEFENDER_WINS)); + assertEq(address(game).balance, 0); + + // Final balances: + // - The proposer recovers their 1 ether stake. + // - The prover gets 1 ether reward. + // - The challenger gets nothing. + assertEq(proposer.balance, 2 ether); + assertEq(prover.balance, 1 ether); + assertEq(challenger.balance, 0); + } + + function test_resolve_challengedNoProof_succeeds() public { + // Challenge the game. + vm.startPrank(challenger); + vm.deal(challenger, 2 ether); + game.challenge{ value: 1 ether }(); + vm.stopPrank(); + + // The contract now has 2 ether total. + assertEq(address(game).balance, 2 ether); + + // We must wait for the prove deadline to pass. + (,,,,, Timestamp deadline) = game.claimData(); + vm.warp(deadline.raw() + 1); + + // Now we can resolve, resulting in CHALLENGER_WINS. + game.resolve(); + + // Challenger gets the bond back and wins proposer's bond. + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + game.claimCredit(challenger); + + assertEq(uint8(game.status()), uint8(GameStatus.CHALLENGER_WINS)); + + // The challenger receives the entire 3 ether. + assertEq(challenger.balance, 3 ether); // started with 2, spent 1, got 2 from the game. + + // The proposer loses their 1 ether stake. + assertEq(proposer.balance, 1 ether); // started with 2, lost 1. + // The contract balance is zero. + assertEq(address(game).balance, 0); + } + + function test_resolve_parentGameInProgress_reverts() public { + vm.startPrank(proposer); + + // Create a new game referencing the child game as parent. + OptimisticZkGame childGame = OptimisticZkGame( + address( + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ) + ) + ); + + vm.stopPrank(); + + // The parent game is still in progress, not resolved. + // So, if we try to resolve the childGame, it should revert with ParentGameNotResolved. + vm.expectRevert(ParentGameNotResolved.selector); + childGame.resolve(); + } + + function test_resolve_parentGameInvalid_succeeds() public { + // 1) Now create a child game referencing that losing parent at index 1. + vm.startPrank(proposer); + OptimisticZkGame childGame = OptimisticZkGame( + address( + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("child-of-loser")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset4, childGameIndex) + ) + ) + ); + vm.stopPrank(); + + // 2) Challenge the parent game so that it ends up CHALLENGER_WINS when proof is not provided within the prove + // deadline. + vm.startPrank(challenger); + vm.deal(challenger, 2 ether); + game.challenge{ value: 1 ether }(); + vm.stopPrank(); + + // 3) Warp past the prove deadline. + (,,,,, Timestamp gameDeadline) = game.claimData(); + vm.warp(gameDeadline.raw() + 1); + + // 4) The game resolves as CHALLENGER_WINS. + game.resolve(); + + // Challenger gets the bond back and wins proposer's bond. + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + game.claimCredit(challenger); + + assertEq(uint8(game.status()), uint8(GameStatus.CHALLENGER_WINS)); + + // 5) If we try to resolve the child game, it should be resolved as CHALLENGER_WINS + // because parent's claim is invalid. + // The child's bond is lost since there is no challenger for the child game. + childGame.resolve(); + + // Challenger hasn't challenged the child game, so it gets nothing. + vm.warp(childGame.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + + vm.expectRevert(NoCreditToClaim.selector); + childGame.claimCredit(challenger); + + assertEq(uint8(childGame.status()), uint8(GameStatus.CHALLENGER_WINS)); + + assertEq(address(childGame).balance, 1 ether); + assertEq(address(challenger).balance, 3 ether); + assertEq(address(proposer).balance, 0 ether); + } +} + +/// @title OptimisticZkGame_Challenge_Test +/// @notice Tests for challenge functionality of OptimisticZkGame. +contract OptimisticZkGame_Challenge_Test is OptimisticZkGame_TestInit { + function test_challenge_alreadyChallenged_reverts() public { + // Initially unchallenged. + (, address counteredBy_,,, OptimisticZkGame.ProposalStatus status_,) = game.claimData(); + assertEq(counteredBy_, address(0)); + assertEq(uint8(status_), uint8(OptimisticZkGame.ProposalStatus.Unchallenged)); + + // The first challenge is valid. + vm.startPrank(challenger); + vm.deal(challenger, 2 ether); + game.challenge{ value: 1 ether }(); + + // A second challenge from any party should revert because the proposal is no longer "Unchallenged". + vm.expectRevert(ClaimAlreadyChallenged.selector); + game.challenge{ value: 1 ether }(); + vm.stopPrank(); + } + + function test_challenge_noPermission_reverts() public { + address maliciousChallenger = address(0x1234); + + vm.startPrank(maliciousChallenger); + vm.deal(maliciousChallenger, 1 ether); + + vm.expectRevert(BadAuth.selector); + game.challenge{ value: 1 ether }(); + + vm.stopPrank(); + } +} + +/// @title OptimisticZkGame_Prove_Test +/// @notice Tests for prove functionality of OptimisticZkGame. +contract OptimisticZkGame_Prove_Test is OptimisticZkGame_TestInit { + function test_prove_afterDeadline_reverts() public { + // Challenge first. + vm.startPrank(challenger); + vm.deal(challenger, 1 ether); + game.challenge{ value: 1 ether }(); + vm.stopPrank(); + + // Move time forward beyond the prove period. + (,,,,, Timestamp deadline) = game.claimData(); + vm.warp(deadline.raw() + 1); + + vm.startPrank(prover); + // Attempting to prove after the deadline is exceeded. + vm.expectRevert(GameOver.selector); + game.prove(bytes("")); + vm.stopPrank(); + } + + function test_prove_alreadyProved_reverts() public { + vm.startPrank(prover); + game.prove(bytes("")); + vm.expectRevert(GameOver.selector); + game.prove(bytes("")); + vm.stopPrank(); + } +} + +/// @title OptimisticZkGame_ClaimCredit_Test +/// @notice Tests for claimCredit functionality of OptimisticZkGame. +contract OptimisticZkGame_ClaimCredit_Test is OptimisticZkGame_TestInit { + function test_claimCredit_notFinalized_reverts() public { + (,,,,, Timestamp deadline) = game.claimData(); + vm.warp(deadline.raw() + 1); + game.resolve(); + + vm.expectRevert(GameNotFinalized.selector); + game.claimCredit(proposer); + } +} + +/// @title OptimisticZkGame_CloseGame_Test +/// @notice Tests for closeGame functionality of OptimisticZkGame. +contract OptimisticZkGame_CloseGame_Test is OptimisticZkGame_TestInit { + function test_closeGame_notResolved_reverts() public { + vm.expectRevert(GameNotFinalized.selector); + game.closeGame(); + } + + function test_closeGame_updatesAnchorGame_succeeds() public { + (,,,,, Timestamp deadline) = game.claimData(); + vm.warp(deadline.raw() + 1); + game.resolve(); + + vm.warp(game.resolvedAt().raw() + anchorStateRegistry.disputeGameFinalityDelaySeconds() + 1 seconds); + game.closeGame(); + + assertEq(address(anchorStateRegistry.anchorGame()), address(game)); + } +} + +/// @title OptimisticZkGame_AccessManager_Test +/// @notice Tests for AccessManager permissionless fallback functionality. +contract OptimisticZkGame_AccessManager_Test is OptimisticZkGame_TestInit { + function test_accessManager_permissionlessAfterTimeout_succeeds() public { + // Initially, unauthorized user should not be allowed + address unauthorizedUser = address(0x9999); + + // Try to create a game as unauthorized user - should fail + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-1")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ); + + vm.prank(proposer); + vm.deal(proposer, 1 ether); + disputeGameFactory.create{ value: 1 ether }( + gameType, Claim.wrap(keccak256("new-claim-2")), abi.encodePacked(childL2SequenceNumber, parentGameIndex) + ); + + // Warp time forward past the timeout + vm.warp(block.timestamp + 2 weeks + 1); + + // Now unauthorized user should be allowed due to timeout + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-3")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset2, childGameIndex) + ); + + // After the new game, timeout resets - unauthorized user should not be allowed immediately + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-4")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset3, childGameIndex) + ); + } + + function test_accessManager_permissionlessNoGamesAfterTimeout_succeeds() public { + // Initially, unauthorized user should not be allowed + address unauthorizedUser = address(0x9999); + + // Try to create a game as unauthorized user - should fail + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-1")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset1, childGameIndex) + ); + + // Warp time forward past the timeout + vm.warp(block.timestamp + 2 weeks + 1 hours); + + // Now unauthorized user should be allowed due to timeout + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-3")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset2, childGameIndex) + ); + + // After the new game, timeout resets - unauthorized user should not be allowed immediately + vm.prank(unauthorizedUser); + vm.deal(unauthorizedUser, 1 ether); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: 1 ether }( + gameType, + Claim.wrap(keccak256("new-claim-4")), + abi.encodePacked(childL2SequenceNumber + grandchildOffset3, childGameIndex) + ); + } +} diff --git a/packages/contracts-bedrock/test/dispute/zk/mocks/SP1MockVerifier.sol b/packages/contracts-bedrock/test/dispute/zk/mocks/SP1MockVerifier.sol new file mode 100644 index 0000000000000..974aa1c8b312d --- /dev/null +++ b/packages/contracts-bedrock/test/dispute/zk/mocks/SP1MockVerifier.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { ISP1Verifier } from "src/dispute/zk/ISP1Verifier.sol"; + +contract SP1MockVerifier is ISP1Verifier { + /// @notice Verifies a mock proof with given public values and vkey. + /// @param proofBytes The proof of the program execution the SP1 zkVM encoded as bytes. + function verifyProof(bytes32, bytes calldata, bytes calldata proofBytes) external pure { + assert(proofBytes.length == 0); + } +} diff --git a/packages/contracts-bedrock/test/governance/MintManager.t.sol b/packages/contracts-bedrock/test/governance/MintManager.t.sol index b7e1f1a8b141d..62c0de45da2d3 100644 --- a/packages/contracts-bedrock/test/governance/MintManager.t.sol +++ b/packages/contracts-bedrock/test/governance/MintManager.t.sol @@ -55,14 +55,14 @@ contract MintManager_Constructor_Test is MintManager_TestInit { /// @title MintManager_Mint_Test /// @notice Tests the `mint` function of the `MintManager` contract. contract MintManager_Mint_Test is MintManager_TestInit { - /// @notice Tests that the mint function properly mints tokens when called by the owner. - function test_mint_fromOwner_succeeds() external { - // Mint once. + /// @notice Tests that the first mint can be any amount since no cap applies. + function testFuzz_mint_firstMint_succeeds(uint256 _amount) external { + _amount = bound(_amount, 0, type(uint192).max); + vm.prank(owner); - manager.mint(owner, 100); + manager.mint(owner, _amount); - // Token balance increases. - assertEq(gov.balanceOf(owner), 100); + assertEq(gov.balanceOf(owner), _amount); } /// @notice Tests that the mint function reverts when called by a non-owner. @@ -73,23 +73,23 @@ contract MintManager_Mint_Test is MintManager_TestInit { manager.mint(owner, 100); } - /// @notice Tests that the mint function properly mints tokens when called by the owner a - /// second time after the mint period has elapsed. - function test_mint_afterPeriodElapsed_succeeds() external { - // Mint once. + /// @notice Tests that subsequent mints succeed when within cap after period elapsed. + function testFuzz_mint_afterPeriodElapsed_succeeds(uint256 _initialAmount, uint256 _secondAmount) external { + _initialAmount = bound(_initialAmount, 1, type(uint192).max); + vm.prank(owner); - manager.mint(owner, 100); + manager.mint(owner, _initialAmount); - // Token balance increases. - assertEq(gov.balanceOf(owner), 100); + assertEq(gov.balanceOf(owner), _initialAmount); + + uint256 maxMint = (_initialAmount * manager.MINT_CAP()) / manager.DENOMINATOR(); + _secondAmount = bound(_secondAmount, 0, maxMint); - // Mint again after period elapsed (2% max). vm.warp(block.timestamp + manager.MINT_PERIOD() + 1); vm.prank(owner); - manager.mint(owner, 2); + manager.mint(owner, _secondAmount); - // Token balance increases. - assertEq(gov.balanceOf(owner), 102); + assertEq(gov.balanceOf(owner), _initialAmount + _secondAmount); } /// @notice Tests that the mint function always reverts when called before the mint period has @@ -134,14 +134,14 @@ contract MintManager_Mint_Test is MintManager_TestInit { /// @title MintManager_Upgrade_Test /// @notice Tests the `upgrade` function of the `MintManager` contract. contract MintManager_Upgrade_Test is MintManager_TestInit { - /// @notice Tests that the owner can upgrade the mint manager. - function test_upgrade_fromOwner_succeeds() external { - // Upgrade to new manager. + /// @notice Tests that the owner can upgrade to any non-zero address. + function testFuzz_upgrade_fromOwner_succeeds(address _newManager) external { + vm.assume(_newManager != address(0)); + vm.prank(owner); - manager.upgrade(rando); + manager.upgrade(_newManager); - // New manager is rando. - assertEq(gov.owner(), rando); + assertEq(gov.owner(), _newManager); } /// @notice Tests that the upgrade function reverts when called by a non-owner. diff --git a/packages/contracts-bedrock/test/integration/EventLogger.t.sol b/packages/contracts-bedrock/test/integration/EventLogger.t.sol index 51ecccc1a4e90..46265da67b6fe 100644 --- a/packages/contracts-bedrock/test/integration/EventLogger.t.sol +++ b/packages/contracts-bedrock/test/integration/EventLogger.t.sol @@ -1,17 +1,20 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Test } from "forge-std/Test.sol"; - -import { Identifier as IfaceIdentifier } from "interfaces/L2/ICrossL2Inbox.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { VmSafe } from "forge-std/Vm.sol"; +// Contracts import { EventLogger } from "../../src/integration/EventLogger.sol"; +import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; +// Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; +// Interfaces +import { Identifier as IfaceIdentifier } from "interfaces/L2/ICrossL2Inbox.sol"; import { ICrossL2Inbox, Identifier as ImplIdentifier } from "interfaces/L2/ICrossL2Inbox.sol"; -import { VmSafe } from "forge-std/Vm.sol"; -import { CrossL2Inbox } from "src/L2/CrossL2Inbox.sol"; /// @title EventLogger_TestInit /// @notice Reusable test initialization for `EventLogger` tests. diff --git a/packages/contracts-bedrock/test/invariants/CustomGasToken.t.sol b/packages/contracts-bedrock/test/invariants/CustomGasToken.t.sol index 764a3a0236ccc..334d8d44a5672 100644 --- a/packages/contracts-bedrock/test/invariants/CustomGasToken.t.sol +++ b/packages/contracts-bedrock/test/invariants/CustomGasToken.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { StdUtils } from "forge-std/Test.sol"; +import { StdUtils } from "forge-std/StdUtils.sol"; import { Vm } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol index ec046d1ebb551..02a4e63e31695 100644 --- a/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol +++ b/packages/contracts-bedrock/test/invariants/ETHLiquidity.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { StdUtils } from "forge-std/Test.sol"; +import { StdUtils } from "forge-std/StdUtils.sol"; import { Vm } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; diff --git a/packages/contracts-bedrock/test/invariants/InvariantTest.sol b/packages/contracts-bedrock/test/invariants/InvariantTest.sol index eea6c158b3577..c4720ec15d16d 100644 --- a/packages/contracts-bedrock/test/invariants/InvariantTest.sol +++ b/packages/contracts-bedrock/test/invariants/InvariantTest.sol @@ -1,9 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing +import { Test } from "test/setup/Test.sol"; import { FFIInterface } from "test/setup/FFIInterface.sol"; + +// Scripts import { Deploy } from "scripts/deploy/Deploy.s.sol"; -import { Test } from "forge-std/Test.sol"; /// @title InvariantTest /// @dev An extension to `Test` that sets up excluded contracts for invariant testing. diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol index f826e0262914a..297f9a0e47a87 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { StdUtils } from "forge-std/Test.sol"; +import { StdUtils } from "forge-std/StdUtils.sol"; import { Vm } from "forge-std/Vm.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; diff --git a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/OptimismSuperchainERC20.t.sol index d53d2fd29f93c..0d01a97a4838b 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/OptimismSuperchainERC20.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol index 8dcd0da95a59e..b10a10471ba26 100644 --- a/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/invariants/SystemConfig.t.sol @@ -1,11 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Libraries +import { Constants } from "src/libraries/Constants.sol"; + +// Interfaces import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; -import { Constants } from "src/libraries/Constants.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract SystemConfig_GasLimitBoundaries_Invariant is Test { diff --git a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol index 764e9ef88975a..b148bcf3e8c19 100644 --- a/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol +++ b/packages/contracts-bedrock/test/legacy/DeployerWhitelist.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Target contract import { IDeployerWhitelist } from "interfaces/legacy/IDeployerWhitelist.sol"; diff --git a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol index d536ad006e149..2d215dd284c81 100644 --- a/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1BlockNumber.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol index aebc7e3072547..c81a0c888b316 100644 --- a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: UNLICENSED pragma solidity 0.8.15; -// Forge -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { VmSafe } from "forge-std/Vm.sol"; // Scripts diff --git a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol index 9df666980ae82..9c4a7bd6a6705 100644 --- a/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/ResolvedDelegateProxy.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Target contract dependencies import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; diff --git a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol index 433271f4cf34a..ae03959dcb327 100644 --- a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol +++ b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { Blueprint } from "src/libraries/Blueprint.sol"; /// @dev Used to test that constructor args are appended properly when deploying from a blueprint. diff --git a/packages/contracts-bedrock/test/libraries/Bytes.t.sol b/packages/contracts-bedrock/test/libraries/Bytes.t.sol index aa919f19a0b43..3cd5b0e3551b6 100644 --- a/packages/contracts-bedrock/test/libraries/Bytes.t.sol +++ b/packages/contracts-bedrock/test/libraries/Bytes.t.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; -// Target contract +// Libraries import { Bytes } from "src/libraries/Bytes.sol"; contract Bytes_Harness { diff --git a/packages/contracts-bedrock/test/libraries/Constants.t.sol b/packages/contracts-bedrock/test/libraries/Constants.t.sol index b83029e822675..60bb52bd87809 100644 --- a/packages/contracts-bedrock/test/libraries/Constants.t.sol +++ b/packages/contracts-bedrock/test/libraries/Constants.t.sol @@ -1,8 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { Constants } from "src/libraries/Constants.sol"; + +// Interfaces import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; /// @title Constants_Test diff --git a/packages/contracts-bedrock/test/libraries/DeployUtils.t.sol b/packages/contracts-bedrock/test/libraries/DeployUtils.t.sol index 1b1af20cf53e6..4bee5fe190846 100644 --- a/packages/contracts-bedrock/test/libraries/DeployUtils.t.sol +++ b/packages/contracts-bedrock/test/libraries/DeployUtils.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Forge -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; diff --git a/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol b/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol index f4851b4afeb49..31cf7d6a62a04 100644 --- a/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol +++ b/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; -// Target contract +// Libraries import { DevFeatures } from "src/libraries/DevFeatures.sol"; contract DevFeatures_isDevFeatureEnabled_Test is Test { diff --git a/packages/contracts-bedrock/test/libraries/EOA.t.sol b/packages/contracts-bedrock/test/libraries/EOA.t.sol index 69a894c90a6de..cc6712eb28e23 100644 --- a/packages/contracts-bedrock/test/libraries/EOA.t.sol +++ b/packages/contracts-bedrock/test/libraries/EOA.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Forge -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { EOA } from "src/libraries/EOA.sol"; diff --git a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol index c20ade9a9d632..66f4d8adea0e3 100644 --- a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol +++ b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol @@ -1,10 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Target contract +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { GasPayingToken } from "src/libraries/GasPayingToken.sol"; import { Constants } from "src/libraries/Constants.sol"; -import { Test } from "forge-std/Test.sol"; import { LibString } from "@solady/utils/LibString.sol"; contract GasPayingToken_Harness { diff --git a/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol b/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol index 2a2592dbe05a3..2aef18af1ed61 100644 --- a/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol +++ b/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol @@ -6,9 +6,28 @@ import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { Bytes } from "src/libraries/Bytes.sol"; import { IEIP712 } from "interfaces/universal/IEIP712.sol"; +/// @title Preinstalls_Harness +/// @notice Harness contract to expose internal Preinstalls library functions for testing. +contract Preinstalls_Harness { + function getDeployedCode(address _addr, uint256 _chainID) external pure returns (bytes memory) { + return Preinstalls.getDeployedCode(_addr, _chainID); + } + + function getName(address _addr) external pure returns (string memory) { + return Preinstalls.getName(_addr); + } +} + /// @title Preinstalls_TestInit /// @notice Reusable test initialization for `Preinstalls` tests. abstract contract Preinstalls_TestInit is CommonTest { + Preinstalls_Harness internal harness; + + function setUp() public virtual override { + super.setUp(); + harness = new Preinstalls_Harness(); + } + function assertPreinstall(address _addr, bytes memory _code) internal view { assertNotEq(_code.length, 0, "must have code"); assertNotEq(_addr.code.length, 0, "deployed preinstall account must have code"); @@ -46,6 +65,26 @@ contract Preinstalls_GetPermit2Code_Test is Preinstalls_TestInit { } } +/// @title Preinstalls_GetDeployedCode_Test +/// @notice Tests the `getDeployedCode` function of the `Preinstalls` library. +contract Preinstalls_GetDeployedCode_Test is Preinstalls_TestInit { + /// @notice Tests that getDeployedCode reverts for an unknown address. + function test_getDeployedCode_unknownAddress_reverts() external { + vm.expectRevert(bytes("Preinstalls: unknown preinstall")); + harness.getDeployedCode(address(0x1234), block.chainid); + } +} + +/// @title Preinstalls_GetName_Test +/// @notice Tests the `getName` function of the `Preinstalls` library. +contract Preinstalls_GetName_Test is Preinstalls_TestInit { + /// @notice Tests that getName reverts for an unknown address. + function test_getName_unknownAddress_reverts() external { + vm.expectRevert(bytes("Preinstalls: unnamed preinstall")); + harness.getName(address(0x1234)); + } +} + /// @title Preinstalls_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Preinstalls` contract /// or are testing multiple functions at once. diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index 084c601cbcb3e..3f6a3bdf25571 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -1,14 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Scripts -import { Config } from "scripts/libraries/Config.sol"; - -// Forge -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { VmSafe } from "forge-std/Vm.sol"; import { StdCheatsSafe } from "forge-std/StdCheats.sol"; +// Scripts +import { Config } from "scripts/libraries/Config.sol"; + // Libraries import { LibString } from "@solady/utils/LibString.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; diff --git a/packages/contracts-bedrock/test/libraries/SemverComp.t.sol b/packages/contracts-bedrock/test/libraries/SemverComp.t.sol index b182af378ffeb..45f2ace041c66 100644 --- a/packages/contracts-bedrock/test/libraries/SemverComp.t.sol +++ b/packages/contracts-bedrock/test/libraries/SemverComp.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Forge -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; // Libraries import { JSONParserLib } from "solady/src/utils/JSONParserLib.sol"; diff --git a/packages/contracts-bedrock/test/libraries/StaticConfig.t.sol b/packages/contracts-bedrock/test/libraries/StaticConfig.t.sol index 00c8fe209571e..47f51380b4766 100644 --- a/packages/contracts-bedrock/test/libraries/StaticConfig.t.sol +++ b/packages/contracts-bedrock/test/libraries/StaticConfig.t.sol @@ -1,11 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { FFIInterface } from "test/setup/FFIInterface.sol"; -// Target contract +// Libraries import { StaticConfig } from "src/libraries/StaticConfig.sol"; /// @title StaticConfig_TestInit diff --git a/packages/contracts-bedrock/test/libraries/Storage.t.sol b/packages/contracts-bedrock/test/libraries/Storage.t.sol index 5a801c24afc31..7ead653247725 100644 --- a/packages/contracts-bedrock/test/libraries/Storage.t.sol +++ b/packages/contracts-bedrock/test/libraries/Storage.t.sol @@ -1,9 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Target contract +// Testing +import { Test } from "test/setup/Test.sol"; + +// Contracts import { StorageSetter } from "src/universal/StorageSetter.sol"; -import { Test } from "forge-std/Test.sol"; /// @title Storage_TestInit /// @notice Reusable test initialization for `Storage` tests. diff --git a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol index 99d6264e2ba73..9d3511cf023bd 100644 --- a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol +++ b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol @@ -1,10 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.24; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; -// Target contractS +// Target contracts import { TransientContext } from "src/libraries/TransientContext.sol"; import { TransientReentrancyAware } from "src/libraries/TransientContext.sol"; diff --git a/packages/contracts-bedrock/test/libraries/rlp/RLPReader.t.sol b/packages/contracts-bedrock/test/libraries/rlp/RLPReader.t.sol index e4ba42414a96f..7e6f752950c59 100644 --- a/packages/contracts-bedrock/test/libraries/rlp/RLPReader.t.sol +++ b/packages/contracts-bedrock/test/libraries/rlp/RLPReader.t.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { stdError } from "forge-std/Test.sol"; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { stdError } from "forge-std/StdError.sol"; + +// Libraries import { RLPReader } from "src/libraries/rlp/RLPReader.sol"; import "src/libraries/rlp/RLPErrors.sol"; diff --git a/packages/contracts-bedrock/test/libraries/rlp/RLPWriter.t.sol b/packages/contracts-bedrock/test/libraries/rlp/RLPWriter.t.sol index 7ff838845be2b..a143d4e68d4d0 100644 --- a/packages/contracts-bedrock/test/libraries/rlp/RLPWriter.t.sol +++ b/packages/contracts-bedrock/test/libraries/rlp/RLPWriter.t.sol @@ -1,8 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { RLPWriter } from "src/libraries/rlp/RLPWriter.sol"; -import { Test } from "forge-std/Test.sol"; /// @title RLPWriter_writeString_Test /// @notice Tests the `writeString` function of the `RLPWriter` library. diff --git a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol index d746933990d52..51e0cb1ae9a47 100644 --- a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol +++ b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { FFIInterface } from "test/setup/FFIInterface.sol"; + +// Libraries import { MerkleTrie } from "src/libraries/trie/MerkleTrie.sol"; import { RLPReader } from "src/libraries/rlp/RLPReader.sol"; -import { FFIInterface } from "test/setup/FFIInterface.sol"; import "src/libraries/rlp/RLPErrors.sol"; contract MerkleTrie_Harness { diff --git a/packages/contracts-bedrock/test/opcm/DeployAlphabetVM.t.sol b/packages/contracts-bedrock/test/opcm/DeployAlphabetVM.t.sol index 64f9f380dc45c..bc87618e0ee39 100644 --- a/packages/contracts-bedrock/test/opcm/DeployAlphabetVM.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployAlphabetVM.t.sol @@ -1,13 +1,15 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts +import { DeployAlphabetVM } from "scripts/deploy/DeployAlphabetVM.s.sol"; // Interfaces import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { DeployAlphabetVM } from "scripts/deploy/DeployAlphabetVM.s.sol"; - contract DeployAlphabetVM2_Test is Test { DeployAlphabetVM deployAlphanetVM; diff --git a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol index 312e82d57e1e8..39828f6691d1a 100644 --- a/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployAltDA.t.sol @@ -1,13 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts import { DeployAltDA } from "scripts/deploy/DeployAltDA.s.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces import { IDataAvailabilityChallenge } from "interfaces/L1/IDataAvailabilityChallenge.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract DeployAltDA_Test is Test { DeployAltDA deployAltDA; diff --git a/packages/contracts-bedrock/test/opcm/DeployAsterisc.t.sol b/packages/contracts-bedrock/test/opcm/DeployAsterisc.t.sol deleted file mode 100644 index f2bd971e646cb..0000000000000 --- a/packages/contracts-bedrock/test/opcm/DeployAsterisc.t.sol +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { Test } from "forge-std/Test.sol"; - -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - -// Interfaces -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; - -import { DeployAsterisc } from "scripts/deploy/DeployAsterisc.s.sol"; - -contract DeployAsterisc_Test is Test { - DeployAsterisc deployAsterisc; - - // Define default input variables for testing. - IPreimageOracle defaultPreimageOracle = IPreimageOracle(makeAddr("preimageOracle")); - - function setUp() public { - deployAsterisc = new DeployAsterisc(); - } - - function test_run_succeeds(DeployAsterisc.Input memory _input) public { - vm.assume(address(_input.preimageOracle) != address(0)); - - DeployAsterisc.Output memory output = deployAsterisc.run(_input); - - DeployUtils.assertValidContractAddress(address(output.asteriscSingleton)); - assertEq(address(output.asteriscSingleton.oracle()), address(_input.preimageOracle), "100"); - } - - function test_run_nullInput_reverts() public { - DeployAsterisc.Input memory input; - - input = defaultInput(); - input.preimageOracle = IPreimageOracle(address(0)); - vm.expectRevert("DeployAsterisc: preimageOracle not set"); - deployAsterisc.run(input); - } - - function defaultInput() internal view returns (DeployAsterisc.Input memory input_) { - input_ = DeployAsterisc.Input(defaultPreimageOracle); - } -} diff --git a/packages/contracts-bedrock/test/opcm/DeployDisputeGame.t.sol b/packages/contracts-bedrock/test/opcm/DeployDisputeGame.t.sol index 6931a6e13c22d..f71c30d88dd18 100644 --- a/packages/contracts-bedrock/test/opcm/DeployDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployDisputeGame.t.sol @@ -1,7 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries +import { LibPosition } from "src/dispute/lib/LibPosition.sol"; +import { GameType } from "src/dispute/lib/Types.sol"; +import { LibString } from "@solady/utils/LibString.sol"; // Interfaces import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; @@ -14,6 +20,7 @@ import { LibPosition } from "src/dispute/lib/LibPosition.sol"; import { GameType } from "src/dispute/lib/Types.sol"; import { LibString } from "@solady/utils/LibString.sol"; +// Contracts import { PreimageOracle } from "src/cannon/PreimageOracle.sol"; import { DeployDisputeGame } from "scripts/deploy/DeployDisputeGame.s.sol"; diff --git a/packages/contracts-bedrock/test/opcm/DeployFeesDepositor.t.sol b/packages/contracts-bedrock/test/opcm/DeployFeesDepositor.t.sol index e8d934427be72..d0a277673fa41 100644 --- a/packages/contracts-bedrock/test/opcm/DeployFeesDepositor.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployFeesDepositor.t.sol @@ -1,18 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; + +// Scripts +import { DeployFeesDepositor } from "scripts/deploy/DeployFeesDepositor.s.sol"; + +// Contracts +import { FeesDepositor } from "src/L1/FeesDepositor.sol"; +import { Proxy } from "src/universal/Proxy.sol"; // Interfaces import { IFeesDepositor } from "interfaces/L1/IFeesDepositor.sol"; import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; -import { DeployFeesDepositor } from "scripts/deploy/DeployFeesDepositor.s.sol"; -import { FeesDepositor } from "src/L1/FeesDepositor.sol"; -import { Proxy } from "src/universal/Proxy.sol"; -import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; - /// @title DeployFeesDepositor_Test /// @notice This test is used to test the DeployFeesDepositor script. contract DeployFeesDepositor_Test is Test { diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 3136aaeb0c21a..4b58a66ede8e9 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -2,7 +2,8 @@ pragma solidity 0.8.15; // Testing -import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; import "../setup/FeatureFlags.sol"; // Libraries @@ -245,6 +246,9 @@ contract DeployImplementations_Test is Test, FeatureFlags { DeployImplementations.Output memory output = deployImplementations.run(input); + // Check which OPCM version is deployed + bool opcmV2Enabled = DevFeatures.isDevFeatureEnabled(_devFeatureBitmap, DevFeatures.OPCM_V2); + // Basic assertions assertNotEq(address(output.anchorStateRegistryImpl), address(0), "100"); assertNotEq(address(output.delayedWETHImpl), address(0), "200"); @@ -254,10 +258,26 @@ contract DeployImplementations_Test is Test, FeatureFlags { assertNotEq(address(output.l1ERC721BridgeImpl), address(0), "500"); assertNotEq(address(output.l1StandardBridgeImpl), address(0), "600"); assertNotEq(address(output.mipsSingleton), address(0), "700"); - assertNotEq(address(output.opcm), address(0), "800"); - assertNotEq(address(output.opcmContractsContainer), address(0), "900"); - assertNotEq(address(output.opcmDeployer), address(0), "1000"); - assertNotEq(address(output.opcmGameTypeAdder), address(0), "1100"); + + // OPCM version-specific assertions + if (opcmV2Enabled) { + assertNotEq(address(output.opcmV2), address(0), "800"); + assertNotEq(address(output.opcmContainer), address(0), "900"); + assertNotEq(address(output.opcmStandardValidator), address(0), "1000"); + // V1 contracts should be null when V2 is enabled + assertEq(address(output.opcm), address(0), "800-v1"); + assertEq(address(output.opcmContractsContainer), address(0), "900-v1"); + assertEq(address(output.opcmDeployer), address(0), "1000-v1"); + assertEq(address(output.opcmGameTypeAdder), address(0), "1100-v1"); + } else { + assertNotEq(address(output.opcm), address(0), "800"); + assertNotEq(address(output.opcmContractsContainer), address(0), "900"); + assertNotEq(address(output.opcmDeployer), address(0), "1000"); + assertNotEq(address(output.opcmGameTypeAdder), address(0), "1100"); + // V2 contracts should be null when V1 is enabled + assertEq(address(output.opcmV2), address(0), "800-v2"); + assertEq(address(output.opcmContainer), address(0), "900-v2"); + } assertNotEq(address(output.faultDisputeGameV2Impl), address(0), "V2 should be deployed when enabled"); assertNotEq(address(output.permissionedDisputeGameV2Impl), address(0), "V2 should be deployed when enabled"); @@ -351,10 +371,26 @@ contract DeployImplementations_Test is Test, FeatureFlags { assertNotEq(address(output.l1ERC721BridgeImpl).code, empty, "1700"); assertNotEq(address(output.l1StandardBridgeImpl).code, empty, "1800"); assertNotEq(address(output.mipsSingleton).code, empty, "1900"); - assertNotEq(address(output.opcm).code, empty, "2000"); - assertNotEq(address(output.opcmContractsContainer).code, empty, "2100"); - assertNotEq(address(output.opcmDeployer).code, empty, "2200"); - assertNotEq(address(output.opcmGameTypeAdder).code, empty, "2300"); + + // OPCM version-specific code assertions + if (opcmV2Enabled) { + assertNotEq(address(output.opcmV2).code, empty, "2000"); + assertNotEq(address(output.opcmContainer).code, empty, "2100"); + assertNotEq(address(output.opcmStandardValidator).code, empty, "2200"); + // V1 contracts should be empty when V2 is enabled + assertEq(address(output.opcm).code, empty, "2000-v1"); + assertEq(address(output.opcmContractsContainer).code, empty, "2100-v1"); + assertEq(address(output.opcmDeployer).code, empty, "2200-v1"); + assertEq(address(output.opcmGameTypeAdder).code, empty, "2300-v1"); + } else { + assertNotEq(address(output.opcm).code, empty, "2000"); + assertNotEq(address(output.opcmContractsContainer).code, empty, "2100"); + assertNotEq(address(output.opcmDeployer).code, empty, "2200"); + assertNotEq(address(output.opcmGameTypeAdder).code, empty, "2300"); + // V2 contracts should be empty when V1 is enabled + assertEq(address(output.opcmV2).code, empty, "2000-v2"); + assertEq(address(output.opcmContainer).code, empty, "2100-v2"); + } assertNotEq(address(output.faultDisputeGameV2Impl).code, empty, "V2 FDG should have code when enabled"); assertNotEq(address(output.permissionedDisputeGameV2Impl).code, empty, "V2 PDG should have code when enabled"); diff --git a/packages/contracts-bedrock/test/opcm/DeployMIPS2.t.sol b/packages/contracts-bedrock/test/opcm/DeployMIPS2.t.sol index 71f6de0f2ddc0..e1a61fd2b2a33 100644 --- a/packages/contracts-bedrock/test/opcm/DeployMIPS2.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployMIPS2.t.sol @@ -1,15 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; - -// Interfaces -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts import { DeployMIPS2 } from "scripts/deploy/DeployMIPS2.s.sol"; -import { MIPS64 } from "src/cannon/MIPS64.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; +// Contracts +import { MIPS64 } from "src/cannon/MIPS64.sol"; + +// Interfaces +import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; + contract DeployMIPS2_Test is Test { DeployMIPS2 deployMIPS; diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 53defc933f152..d5a3b3d7507e4 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -1,19 +1,26 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { FeatureFlags } from "test/setup/FeatureFlags.sol"; -import { Features } from "src/libraries/Features.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +// Scripts import { DeploySuperchain } from "scripts/deploy/DeploySuperchain.s.sol"; import { DeployImplementations } from "scripts/deploy/DeployImplementations.s.sol"; import { DeployOPChain } from "scripts/deploy/DeployOPChain.s.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; import { Types } from "scripts/libraries/Types.sol"; +// Libraries +import { Features } from "src/libraries/Features.sol"; + +// Interfaces import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract DeployOPChain_TestBase is Test, FeatureFlags { DeploySuperchain deploySuperchain; @@ -57,7 +64,8 @@ contract DeployOPChain_TestBase is Test, FeatureFlags { uint256 disputeSplitDepth = 30; Duration disputeClockExtension = Duration.wrap(3 hours); Duration disputeMaxClockDuration = Duration.wrap(3.5 days); - IOPContractsManager opcm; + address opcmAddr; + ISuperchainConfig superchainConfig; bool useCustomGasToken = false; event Deployed(uint256 indexed l2ChainId, address indexed deployer, bytes deployOutput); @@ -101,8 +109,13 @@ contract DeployOPChain_TestBase is Test, FeatureFlags { devFeatureBitmap: devFeatureBitmap }) ); - opcm = dio.opcm; - vm.label(address(opcm), "opcm"); + // Select OPCM v1 or v2 based on feature flag + opcmAddr = isDevFeatureEnabled(DevFeatures.OPCM_V2) ? address(dio.opcmV2) : address(dio.opcm); + vm.label(address(dio.opcm), "opcm"); + vm.label(address(dio.opcmV2), "opcmV2"); + + // Set superchainConfig from deployment + superchainConfig = dso.superchainConfigProxy; // 3) Build DeployOPChainInput struct deployOPChainInput = Types.DeployOPChainInput({ @@ -115,7 +128,7 @@ contract DeployOPChain_TestBase is Test, FeatureFlags { basefeeScalar: basefeeScalar, blobBaseFeeScalar: blobBaseFeeScalar, l2ChainId: l2ChainId, - opcm: address(opcm), + opcm: opcmAddr, saltMixer: saltMixer, gasLimit: gasLimit, disputeGameType: disputeGameType, @@ -127,6 +140,7 @@ contract DeployOPChain_TestBase is Test, FeatureFlags { allowCustomDisputeParameters: false, operatorFeeScalar: 0, operatorFeeConstant: 0, + superchainConfig: superchainConfig, useCustomGasToken: useCustomGasToken }); } @@ -162,6 +176,34 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { useCustomGasToken, "SystemConfig CUSTOM_GAS_TOKEN feature" ); + + // Verify superchainConfig is set correctly + assertEq( + address(doo.systemConfigProxy.superchainConfig()), + address(deployOPChainInput.superchainConfig), + "superchainConfig mismatch" + ); + + // OPCM v2 specific assertions + if (isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + // PERMISSIONED_CANNON must always be enabled with 0.08 ether init bond + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.PERMISSIONED_CANNON), 0.08 ether); + assertNotEq(address(doo.disputeGameFactoryProxy.gameImpls(GameTypes.PERMISSIONED_CANNON)), address(0)); + + // CANNON is only enabled if it's the starting game type + bool cannonEnabled = deployOPChainInput.disputeGameType.raw() == GameTypes.CANNON.raw(); + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.CANNON), cannonEnabled ? 0.08 ether : 0); + if (cannonEnabled) { + assertNotEq(address(doo.disputeGameFactoryProxy.gameImpls(GameTypes.CANNON)), address(0)); + } + + // CANNON_KONA is only enabled if it's the starting game type + bool cannonKonaEnabled = deployOPChainInput.disputeGameType.raw() == GameTypes.CANNON_KONA.raw(); + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.CANNON_KONA), cannonKonaEnabled ? 0.08 ether : 0); + if (cannonKonaEnabled) { + assertNotEq(address(doo.disputeGameFactoryProxy.gameImpls(GameTypes.CANNON_KONA)), address(0)); + } + } } function testFuzz_run_memory_succeeds(bytes32 _seed) public { @@ -178,27 +220,33 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { DeployOPChain.Output memory doo = deployOPChain.run(deployOPChainInput); - // Verify that the initial bonds are zero. - assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.CANNON), 0, "2700"); - assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.PERMISSIONED_CANNON), 0, "2800"); + // Skip init bond checks for OPCM v2 (bonds are set during deployment, not zero) + if (!isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + // Verify that the initial bonds are zero for OPCM v1. + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.CANNON), 0, "2700"); + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.PERMISSIONED_CANNON), 0, "2800"); + } // Check dispute game deployments // Validate permissionedDisputeGame (PDG) address - IOPContractsManager.Implementations memory impls = opcm.implementations(); + IOPContractsManager.Implementations memory impls = IOPContractsManager(opcmAddr).implementations(); address expectedPDGAddress = impls.permissionedDisputeGameV2Impl; address actualPDGAddress = address(doo.disputeGameFactoryProxy.gameImpls(GameTypes.PERMISSIONED_CANNON)); assertNotEq(actualPDGAddress, address(0), "PDG address should be non-zero"); assertEq(actualPDGAddress, expectedPDGAddress, "PDG address should match expected address"); - // Check PDG getters - IPermissionedDisputeGame pdg = IPermissionedDisputeGame(actualPDGAddress); - bytes32 expectedPrestate = bytes32(0); - assertEq(pdg.l2BlockNumber(), 0, "3000"); - assertEq(Claim.unwrap(pdg.absolutePrestate()), expectedPrestate, "3100"); - assertEq(Duration.unwrap(pdg.clockExtension()), 10800, "3200"); - assertEq(Duration.unwrap(pdg.maxClockDuration()), 302400, "3300"); - assertEq(pdg.splitDepth(), 30, "3400"); - assertEq(pdg.maxGameDepth(), 73, "3500"); + // Skip PDG getter checks for OPCM v2 (game args are passed at creation time) + if (!isDevFeatureEnabled(DevFeatures.OPCM_V2)) { + // Check PDG getters + IPermissionedDisputeGame pdg = IPermissionedDisputeGame(actualPDGAddress); + bytes32 expectedPrestate = bytes32(0); + assertEq(pdg.l2BlockNumber(), 0, "3000"); + assertEq(Claim.unwrap(pdg.absolutePrestate()), expectedPrestate, "3100"); + assertEq(Duration.unwrap(pdg.clockExtension()), 10800, "3200"); + assertEq(Duration.unwrap(pdg.maxClockDuration()), 302400, "3300"); + assertEq(pdg.splitDepth(), 30, "3400"); + assertEq(pdg.maxGameDepth(), 73, "3500"); + } // Verify custom gas token feature is set as seeded assertEq( diff --git a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol index ae52362446be6..3c8983c7b633c 100644 --- a/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeploySuperchain.t.sol @@ -1,11 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts +import { DeploySuperchain } from "scripts/deploy/DeploySuperchain.s.sol"; + +// Contracts import { Proxy } from "src/universal/Proxy.sol"; + +// Interfaces import { ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; -import { DeploySuperchain } from "scripts/deploy/DeploySuperchain.s.sol"; contract DeploySuperchain_Test is Test { DeploySuperchain deploySuperchain; diff --git a/packages/contracts-bedrock/test/opcm/InteropMigration.t.sol b/packages/contracts-bedrock/test/opcm/InteropMigration.t.sol index 27c71d4329c94..a98f8731ed6ea 100644 --- a/packages/contracts-bedrock/test/opcm/InteropMigration.t.sol +++ b/packages/contracts-bedrock/test/opcm/InteropMigration.t.sol @@ -1,13 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts import { InteropMigrationInput, InteropMigration, InteropMigrationOutput } from "scripts/deploy/InteropMigration.s.sol"; + +// Libraries +import { Claim } from "src/dispute/lib/Types.sol"; + +// Interfaces import { IOPContractsManagerInteropMigrator, IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; -import { Claim } from "src/dispute/lib/Types.sol"; contract InteropMigrationInput_Test is Test { InteropMigrationInput input; diff --git a/packages/contracts-bedrock/test/opcm/SetDisputeGameImpl.t.sol b/packages/contracts-bedrock/test/opcm/SetDisputeGameImpl.t.sol index ad266dccd79f7..d0a86a4c6a1a8 100644 --- a/packages/contracts-bedrock/test/opcm/SetDisputeGameImpl.t.sol +++ b/packages/contracts-bedrock/test/opcm/SetDisputeGameImpl.t.sol @@ -1,16 +1,25 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Test } from "forge-std/Test.sol"; -import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; -import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; -import { GameType, Proposal, Hash } from "src/dispute/lib/Types.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts import { SetDisputeGameImpl, SetDisputeGameImplInput } from "scripts/deploy/SetDisputeGameImpl.s.sol"; + +// Contracts import { DisputeGameFactory } from "src/dispute/DisputeGameFactory.sol"; import { Proxy } from "src/universal/Proxy.sol"; import { SuperchainConfig } from "src/L1/SuperchainConfig.sol"; import { AnchorStateRegistry } from "src/dispute/AnchorStateRegistry.sol"; import { SystemConfig } from "src/L1/SystemConfig.sol"; + +// Libraries +import { GameType, Proposal, Hash } from "src/dispute/lib/Types.sol"; + +// Interfaces +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; diff --git a/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol index 6a662631580a5..da47f8b555cf5 100644 --- a/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol @@ -1,111 +1,161 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { Claim } from "src/dispute/lib/Types.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; -import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +// Scripts +import { UpgradeOPChain, UpgradeOPChainInput } from "scripts/deploy/UpgradeOPChain.s.sol"; +// Contracts import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { OPContractsManagerV2 } from "src/L1/opcm/OPContractsManagerV2.sol"; import { UpgradeOPChain, UpgradeOPChainInput } from "scripts/deploy/UpgradeOPChain.s.sol"; +// Libraries +import { Claim } from "src/dispute/lib/Types.sol"; +import { GameType } from "src/dispute/lib/LibUDT.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; + +// Interfaces +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; + contract UpgradeOPChainInput_Test is Test { UpgradeOPChainInput input; + MockOPCMV1 _mockOPCM; function setUp() public { input = new UpgradeOPChainInput(); + _mockOPCM = new MockOPCMV1(); + input.set(input.opcm.selector, address(_mockOPCM)); } + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when the upgrade input is not + /// completely set. function test_getters_whenNotSet_reverts() public { + UpgradeOPChainInput freshInput = new UpgradeOPChainInput(); + vm.expectRevert("UpgradeOPCMInput: prank not set"); - input.prank(); + freshInput.prank(); vm.expectRevert("UpgradeOPCMInput: not set"); - input.opcm(); + freshInput.opcm(); vm.expectRevert("UpgradeOPCMInput: not set"); - input.opChainConfigs(); + freshInput.upgradeInput(); } - function test_setAddress_succeeds() public { - address mockPrank = makeAddr("prank"); - address mockOPCM = makeAddr("opcm"); - - // Create mock contract at OPCM address - vm.etch(mockOPCM, hex"01"); + /// @notice This test verifies that the UpgradeOPChain script correctly sets the upgrade input with + /// the address type. + function testFuzz_setAddress_succeeds(address mockPrank, address mockOPCM) public { + vm.assume(mockPrank != address(0)); + vm.assume(mockOPCM != address(0)); - input.set(input.prank.selector, mockPrank); - input.set(input.opcm.selector, mockOPCM); + UpgradeOPChainInput freshInput = new UpgradeOPChainInput(); + freshInput.set(freshInput.prank.selector, mockPrank); + freshInput.set(freshInput.opcm.selector, mockOPCM); - assertEq(input.prank(), mockPrank); - assertEq(address(input.opcm()), mockOPCM); + assertEq(freshInput.prank(), mockPrank); + assertEq(freshInput.opcm(), mockOPCM); } - function test_setOpChainConfigs_succeeds() public { + /// @notice This test verifies that the UpgradeOPChain script correctly sets the upgrade input with + /// the OPContractsManager.OpChainConfig[] type. + function testFuzz_setOpChainConfigs_succeeds( + address systemConfig1, + address systemConfig2, + bytes32 prestate1, + bytes32 konaPrestate1, + bytes32 prestate2, + bytes32 konaPrestate2 + ) + public + { + // Assume non-zero addresses for system configs + vm.assume(systemConfig1 != address(0)); + vm.assume(systemConfig2 != address(0)); + // Assume not precompiles for system configs + assumeNotPrecompile(systemConfig1); + assumeNotPrecompile(systemConfig2); + // Ensure system configs don't collide with test contracts + vm.assume(systemConfig1 != address(input)); + vm.assume(systemConfig1 != address(_mockOPCM)); + vm.assume(systemConfig2 != address(input)); + vm.assume(systemConfig2 != address(_mockOPCM)); + // Create sample OpChainConfig array OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](2); // Setup mock addresses and contracts for first config - address systemConfig1 = makeAddr("systemConfig1"); - address proxyAdmin1 = makeAddr("proxyAdmin1"); vm.etch(systemConfig1, hex"01"); - vm.etch(proxyAdmin1, hex"01"); configs[0] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(systemConfig1), - cannonPrestate: Claim.wrap(bytes32(uint256(1))), - cannonKonaPrestate: Claim.wrap(bytes32(uint256(2))) + cannonPrestate: Claim.wrap(prestate1), + cannonKonaPrestate: Claim.wrap(konaPrestate1) }); // Setup mock addresses and contracts for second config - address systemConfig2 = makeAddr("systemConfig2"); - address proxyAdmin2 = makeAddr("proxyAdmin2"); vm.etch(systemConfig2, hex"01"); - vm.etch(proxyAdmin2, hex"01"); configs[1] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(systemConfig2), - cannonPrestate: Claim.wrap(bytes32(uint256(2))), - cannonKonaPrestate: Claim.wrap(bytes32(uint256(3))) + cannonPrestate: Claim.wrap(prestate2), + cannonKonaPrestate: Claim.wrap(konaPrestate2) }); - input.set(input.opChainConfigs.selector, configs); + input.set(input.upgradeInput.selector, configs); - bytes memory storedConfigs = input.opChainConfigs(); + bytes memory storedConfigs = input.upgradeInput(); assertEq(storedConfigs, abi.encode(configs)); // Additional verification of stored claims if needed OPContractsManager.OpChainConfig[] memory decodedConfigs = abi.decode(storedConfigs, (OPContractsManager.OpChainConfig[])); - assertEq(Claim.unwrap(decodedConfigs[0].cannonPrestate), bytes32(uint256(1))); - assertEq(Claim.unwrap(decodedConfigs[1].cannonPrestate), bytes32(uint256(2))); + assertEq(Claim.unwrap(decodedConfigs[0].cannonPrestate), prestate1); + assertEq(Claim.unwrap(decodedConfigs[1].cannonPrestate), prestate2); + assertEq(Claim.unwrap(decodedConfigs[0].cannonKonaPrestate), konaPrestate1); + assertEq(Claim.unwrap(decodedConfigs[1].cannonKonaPrestate), konaPrestate2); } + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// a zero address. function test_setAddress_withZeroAddress_reverts() public { + UpgradeOPChainInput freshInput = new UpgradeOPChainInput(); + vm.expectRevert("UpgradeOPCMInput: cannot set zero address"); - input.set(input.prank.selector, address(0)); + freshInput.set(freshInput.prank.selector, address(0)); vm.expectRevert("UpgradeOPCMInput: cannot set zero address"); - input.set(input.opcm.selector, address(0)); + freshInput.set(freshInput.opcm.selector, address(0)); } + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// an empty array. function test_setOpChainConfigs_withEmptyArray_reverts() public { OPContractsManager.OpChainConfig[] memory emptyConfigs = new OPContractsManager.OpChainConfig[](0); vm.expectRevert("UpgradeOPCMInput: cannot set empty array"); - input.set(input.opChainConfigs.selector, emptyConfigs); + input.set(input.upgradeInput.selector, emptyConfigs); } - function test_set_withInvalidSelector_reverts() public { + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// an invalid selector. + function testFuzz_set_withInvalidSelector_reverts(bytes4 invalidSelector, address testAddr) public { + // Assume the selector is not one of the valid selectors + vm.assume(invalidSelector != input.prank.selector); + vm.assume(invalidSelector != input.opcm.selector); + vm.assume(invalidSelector != input.upgradeInput.selector); + vm.assume(testAddr != address(0)); + vm.expectRevert("UpgradeOPCMInput: unknown selector"); - input.set(bytes4(0xdeadbeef), makeAddr("test")); + input.set(invalidSelector, testAddr); // Create a single config for testing invalid selector OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); address mockSystemConfig = makeAddr("systemConfig"); - address mockProxyAdmin = makeAddr("proxyAdmin"); vm.etch(mockSystemConfig, hex"01"); - vm.etch(mockProxyAdmin, hex"01"); configs[0] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(mockSystemConfig), @@ -114,15 +164,172 @@ contract UpgradeOPChainInput_Test is Test { }); vm.expectRevert("UpgradeOPCMInput: unknown selector"); - input.set(bytes4(0xdeadbeef), configs); + input.set(invalidSelector, configs); + } + + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// OPCM v2 input when OPCM v1 is enabled. + function testFuzz_setUpgradeInputV2_onV1OPCM_reverts( + address systemConfig, + bool enabled, + uint256 initBond, + uint32 gameType + ) + public + { + vm.assume(systemConfig != address(0)); + vm.assume(initBond > 0); + + // Try to set V2 input when V1 is enabled + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](1); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: enabled, + initBond: initBond, + gameType: GameType.wrap(gameType), + gameArgs: abi.encode("test") + }); + + OPContractsManagerV2.UpgradeInput memory upgradeInput = OPContractsManagerV2.UpgradeInput({ + systemConfig: ISystemConfig(systemConfig), + disputeGameConfigs: disputeGameConfigs, + extraInstructions: new IOPContractsManagerUtils.ExtraInstruction[](0) + }); + + vm.expectRevert("UpgradeOPCMInput: cannot set OPCM v2 upgrade input when OPCM v1 is enabled"); + input.set(input.upgradeInput.selector, upgradeInput); + } +} + +contract UpgradeOPChainInput_TestV2 is Test { + UpgradeOPChainInput input; + MockOPCMV2 mockOPCM; + + function setUp() public { + input = new UpgradeOPChainInput(); + mockOPCM = new MockOPCMV2(); + input.set(input.opcm.selector, address(mockOPCM)); + } + + /// @notice Tests that the upgrade input can be set using the OPContractsManagerV2.UpgradeInput type. + function testFuzz_setUpgradeInputV2_succeeds( + address systemConfig, + bool enabled, + uint256 initBond, + uint32 gameType, + bytes memory gameArgs, + string memory extraKey, + bytes memory extraData + ) + public + { + // Assume non-zero address for system config + vm.assume(systemConfig != address(0)); + vm.assume(initBond > 0); + + // Create sample UpgradeInputV2 + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](1); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: enabled, + initBond: initBond, + gameType: GameType.wrap(gameType), + gameArgs: gameArgs + }); + + IOPContractsManagerUtils.ExtraInstruction[] memory extraInstructions = + new IOPContractsManagerUtils.ExtraInstruction[](1); + extraInstructions[0] = IOPContractsManagerUtils.ExtraInstruction({ key: extraKey, data: extraData }); + + OPContractsManagerV2.UpgradeInput memory upgradeInput = OPContractsManagerV2.UpgradeInput({ + systemConfig: ISystemConfig(systemConfig), + disputeGameConfigs: disputeGameConfigs, + extraInstructions: extraInstructions + }); + + input.set(input.upgradeInput.selector, upgradeInput); + + bytes memory storedUpgradeInput = input.upgradeInput(); + assertEq(storedUpgradeInput, abi.encode(upgradeInput)); + + // Additional verification of stored values if needed + OPContractsManagerV2.UpgradeInput memory decodedUpgradeInput = + abi.decode(storedUpgradeInput, (OPContractsManagerV2.UpgradeInput)); + // Check system config matches + assertEq(address(decodedUpgradeInput.systemConfig), address(upgradeInput.systemConfig)); + // Check dispute game configs match + assertEq(decodedUpgradeInput.disputeGameConfigs.length, disputeGameConfigs.length); + assertEq(decodedUpgradeInput.disputeGameConfigs[0].enabled, enabled); + assertEq(decodedUpgradeInput.disputeGameConfigs[0].initBond, initBond); + assertEq(GameType.unwrap(decodedUpgradeInput.disputeGameConfigs[0].gameType), gameType); + assertEq(keccak256(decodedUpgradeInput.disputeGameConfigs[0].gameArgs), keccak256(gameArgs)); + // Check extra instructions match + assertEq(decodedUpgradeInput.extraInstructions.length, extraInstructions.length); + assertEq(decodedUpgradeInput.extraInstructions[0].key, extraKey); + assertEq(keccak256(decodedUpgradeInput.extraInstructions[0].data), keccak256(extraData)); + } + + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// a zero system config. + function testFuzz_setUpgradeInputV2_withZeroSystemConfig_reverts() public { + OPContractsManagerV2.UpgradeInput memory upgradeInput = OPContractsManagerV2.UpgradeInput({ + systemConfig: ISystemConfig(address(0)), + disputeGameConfigs: new IOPContractsManagerUtils.DisputeGameConfig[](1), + extraInstructions: new IOPContractsManagerUtils.ExtraInstruction[](0) + }); + + vm.expectRevert("UpgradeOPCMInput: cannot set zero address"); + input.set(input.upgradeInput.selector, upgradeInput); + } + + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// an empty dispute game configs array. + function testFuzz_setUpgradeInputV2_withEmptyDisputeGameConfigs_reverts(address systemConfig) public { + vm.assume(systemConfig != address(0)); + + OPContractsManagerV2.UpgradeInput memory upgradeInput = OPContractsManagerV2.UpgradeInput({ + systemConfig: ISystemConfig(systemConfig), + disputeGameConfigs: new IOPContractsManagerUtils.DisputeGameConfig[](0), + extraInstructions: new IOPContractsManagerUtils.ExtraInstruction[](0) + }); + + vm.expectRevert("UpgradeOPCMInput: cannot set empty dispute game configs array"); + input.set(input.upgradeInput.selector, upgradeInput); + } + + /// @notice This test verifies that the UpgradeOPChain script correctly reverts when setting the upgrade input with + /// OPCM v1 input when OPCM v2 is enabled. + function testFuzz_setUpgradeInputV1_onV2OPCM_reverts( + address systemConfigProxy, + bytes32 cannonPrestate, + bytes32 cannonKonaPrestate + ) + public + { + vm.assume(systemConfigProxy != address(0)); + + // Try to set V1 input when V2 is enabled + OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); + configs[0] = OPContractsManager.OpChainConfig({ + systemConfigProxy: ISystemConfig(systemConfigProxy), + cannonPrestate: Claim.wrap(cannonPrestate), + cannonKonaPrestate: Claim.wrap(cannonKonaPrestate) + }); + + vm.expectRevert("UpgradeOPCMInput: cannot set OPCM v1 upgrade input when OPCM v2 is enabled"); + input.set(input.upgradeInput.selector, configs); } } -contract MockOPCM { +contract MockOPCMV1 { event UpgradeCalled( address indexed sysCfgProxy, bytes32 indexed absolutePrestate, bytes32 indexed cannonKonaPrestate ); + function isDevFeatureEnabled(bytes32 /* _feature */ ) public pure returns (bool) { + return false; + } + function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) public { emit UpgradeCalled( address(_opChainConfigs[0].systemConfigProxy), @@ -132,8 +339,26 @@ contract MockOPCM { } } +contract MockOPCMV2 { + event UpgradeCalled( + address indexed systemConfig, + IOPContractsManagerUtils.DisputeGameConfig[] indexed disputeGameConfigs, + IOPContractsManagerUtils.ExtraInstruction[] indexed extraInstructions + ); + + function isDevFeatureEnabled(bytes32 _feature) public pure returns (bool) { + return _feature == DevFeatures.OPCM_V2; + } + + function upgrade(OPContractsManagerV2.UpgradeInput memory _upgradeInput) public { + emit UpgradeCalled( + address(_upgradeInput.systemConfig), _upgradeInput.disputeGameConfigs, _upgradeInput.extraInstructions + ); + } +} + contract UpgradeOPChain_Test is Test { - MockOPCM mockOPCM; + MockOPCMV1 mockOPCM; UpgradeOPChainInput uoci; OPContractsManager.OpChainConfig config; UpgradeOPChain upgradeOPChain; @@ -143,30 +368,106 @@ contract UpgradeOPChain_Test is Test { address indexed sysCfgProxy, bytes32 indexed absolutePrestate, bytes32 indexed cannonKonaPrestate ); - function setUp() public virtual { - mockOPCM = new MockOPCM(); + function setUp() public { + mockOPCM = new MockOPCMV1(); uoci = new UpgradeOPChainInput(); uoci.set(uoci.opcm.selector, address(mockOPCM)); + prank = makeAddr("prank"); + uoci.set(uoci.prank.selector, prank); + upgradeOPChain = new UpgradeOPChain(); + } + + /// @notice This test verifies that the UpgradeOPChain script correctly encodes and passes down the upgrade input + /// arguments to the OPCM contract's upgrade function. + /// @dev It does not test the actual upgrade functionality. + function testFuzz_upgrade_succeeds( + address systemConfigProxy, + bytes32 cannonPrestate, + bytes32 cannonKonaPrestate + ) + public + { + vm.assume(systemConfigProxy != address(0)); + config = OPContractsManager.OpChainConfig({ - systemConfigProxy: ISystemConfig(makeAddr("systemConfigProxy")), - cannonPrestate: Claim.wrap(keccak256("cannonPrestate")), - cannonKonaPrestate: Claim.wrap(keccak256("cannonKonaPrestate")) + systemConfigProxy: ISystemConfig(systemConfigProxy), + cannonPrestate: Claim.wrap(cannonPrestate), + cannonKonaPrestate: Claim.wrap(cannonKonaPrestate) }); OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); configs[0] = config; - uoci.set(uoci.opChainConfigs.selector, configs); + uoci.set(uoci.upgradeInput.selector, configs); + + // UpgradeCalled should be emitted by the prank since it's a delegate call. + vm.expectEmit(address(prank)); + emit UpgradeCalled( + address(config.systemConfigProxy), + Claim.unwrap(config.cannonPrestate), + Claim.unwrap(config.cannonKonaPrestate) + ); + upgradeOPChain.run(uoci); + } +} + +contract UpgradeOPChain_TestV2 is Test { + MockOPCMV2 mockOPCM; + UpgradeOPChainInput uoci; + UpgradeOPChain upgradeOPChain; + address prank; + + event UpgradeCalled( + address indexed systemConfig, + IOPContractsManagerUtils.DisputeGameConfig[] indexed disputeGameConfigs, + IOPContractsManagerUtils.ExtraInstruction[] indexed extraInstructions + ); + + function setUp() public { + mockOPCM = new MockOPCMV2(); + uoci = new UpgradeOPChainInput(); + uoci.set(uoci.opcm.selector, address(mockOPCM)); + prank = makeAddr("prank"); uoci.set(uoci.prank.selector, prank); upgradeOPChain = new UpgradeOPChain(); } - function test_upgrade_succeeds() public { + /// @notice This test verifies that the UpgradeOPChain script correctly encodes and passes down the upgrade input + /// arguments to the OPCM contract's upgrade function. + /// @dev It does not test the actual upgrade functionality. + function testFuzz_upgrade_succeeds( + address systemConfig, + bool enabled, + uint256 initBond, + uint32 gameType, + bytes memory gameArgs + ) + public + { + vm.assume(systemConfig != address(0)); + + // NOTE: Setting the upgrade input here to avoid `Copying of type struct + // IOPContractsManagerUtils.DisputeGameConfig memory[] memory to storage + // not yet supported.` error. + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](1); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ + enabled: enabled, + initBond: initBond, + gameType: GameType.wrap(gameType), + gameArgs: gameArgs + }); + + OPContractsManagerV2.UpgradeInput memory upgradeInput = OPContractsManagerV2.UpgradeInput({ + systemConfig: ISystemConfig(systemConfig), + disputeGameConfigs: disputeGameConfigs, + extraInstructions: new IOPContractsManagerUtils.ExtraInstruction[](0) + }); + uoci.set(uoci.upgradeInput.selector, upgradeInput); + // UpgradeCalled should be emitted by the prank since it's a delegate call. vm.expectEmit(address(prank)); emit UpgradeCalled( - address(config.systemConfigProxy), - Claim.unwrap(config.cannonPrestate), - Claim.unwrap(config.cannonKonaPrestate) + address(upgradeInput.systemConfig), upgradeInput.disputeGameConfigs, upgradeInput.extraInstructions ); upgradeOPChain.run(uoci); } diff --git a/packages/contracts-bedrock/test/opcm/UpgradeSuperchainConfig.t.sol b/packages/contracts-bedrock/test/opcm/UpgradeSuperchainConfig.t.sol index da09590493a10..f9308dd30e2a5 100644 --- a/packages/contracts-bedrock/test/opcm/UpgradeSuperchainConfig.t.sol +++ b/packages/contracts-bedrock/test/opcm/UpgradeSuperchainConfig.t.sol @@ -1,27 +1,51 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +// Scripts +import { UpgradeSuperchainConfig } from "scripts/deploy/UpgradeSuperchainConfig.s.sol"; + +// Interfaces import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; +import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; -import { UpgradeSuperchainConfig } from "scripts/deploy/UpgradeSuperchainConfig.s.sol"; -import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; -/// @title MockOPCM +/// @title MockOPCMV1 /// @notice This contract is used to mock the OPCM contract and emit an event which we check for in the test. -contract MockOPCM { +contract MockOPCMV1 { event UpgradeCalled(address indexed superchainConfig); + function isDevFeatureEnabled(bytes32 /* _feature */ ) public pure returns (bool) { + return false; + } + function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig) public { emit UpgradeCalled(address(_superchainConfig)); } } +/// @title MockOPCMV2 +/// @notice This contract is used to mock the OPCM v2 contract and emit an event which we check for in the test. +contract MockOPCMV2 { + event UpgradeCalled(IOPContractsManagerV2.SuperchainUpgradeInput indexed superchainUpgradeInput); + + function isDevFeatureEnabled(bytes32 _feature) public pure returns (bool) { + return _feature == DevFeatures.OPCM_V2; + } + + function upgradeSuperchain(IOPContractsManagerV2.SuperchainUpgradeInput memory _superchainUpgradeInput) public { + emit UpgradeCalled(_superchainUpgradeInput); + } +} + /// @title UpgradeSuperchainConfig_Test /// @notice This test is used to test the UpgradeSuperchainConfig script. -contract UpgradeSuperchainConfig_Run_Test is Test { - MockOPCM mockOPCM; +contract UpgradeSuperchainConfigV1_Run_Test is Test { + MockOPCMV1 mockOPCM; UpgradeSuperchainConfig.Input input; UpgradeSuperchainConfig upgradeSuperchainConfig; address prank; @@ -31,9 +55,9 @@ contract UpgradeSuperchainConfig_Run_Test is Test { /// @notice Sets up the test suite. function setUp() public virtual { - mockOPCM = new MockOPCM(); + mockOPCM = new MockOPCMV1(); - input.opcm = IOPContractsManager(address(mockOPCM)); + input.opcm = address(mockOPCM); superchainConfig = ISuperchainConfig(makeAddr("superchainConfig")); prank = makeAddr("prank"); @@ -59,10 +83,10 @@ contract UpgradeSuperchainConfig_Run_Test is Test { upgradeSuperchainConfig.run(input); input.prank = prank; - input.opcm = IOPContractsManager(address(0)); + input.opcm = address(0); vm.expectRevert("UpgradeSuperchainConfig: opcm not set"); upgradeSuperchainConfig.run(input); - input.opcm = IOPContractsManager(address(mockOPCM)); + input.opcm = address(mockOPCM); input.superchainConfig = ISuperchainConfig(address(0)); vm.expectRevert("UpgradeSuperchainConfig: superchainConfig not set"); @@ -70,3 +94,52 @@ contract UpgradeSuperchainConfig_Run_Test is Test { input.superchainConfig = ISuperchainConfig(address(superchainConfig)); } } + +/// @title UpgradeSuperchainConfigV2_Run_Test +/// @notice This test is used to test the UpgradeSuperchainConfig script with OPCM v2. +contract UpgradeSuperchainConfigV2_Run_Test is Test { + MockOPCMV2 mockOPCM; + UpgradeSuperchainConfig upgradeSuperchainConfig; + address prank; + ISuperchainConfig superchainConfig; + + event UpgradeCalled(IOPContractsManagerV2.SuperchainUpgradeInput indexed superchainUpgradeInput); + + /// @notice Sets up the test suite. + function setUp() public { + mockOPCM = new MockOPCMV2(); + + superchainConfig = ISuperchainConfig(makeAddr("superchainConfig")); + prank = makeAddr("prank"); + + upgradeSuperchainConfig = new UpgradeSuperchainConfig(); + } + + /// @notice Tests that the UpgradeSuperchainConfig script succeeds when called with non-zero input values. + function testFuzz_upgrade_succeeds(IOPContractsManagerUtils.ExtraInstruction[] memory extraInstructions) public { + UpgradeSuperchainConfig.Input memory input = _getInput(extraInstructions); + + // UpgradeCalled should be emitted by the prank since it's a delegate call. + vm.expectEmit(address(prank)); + emit UpgradeCalled( + IOPContractsManagerV2.SuperchainUpgradeInput({ + superchainConfig: superchainConfig, + extraInstructions: extraInstructions + }) + ); + upgradeSuperchainConfig.run(input); + } + + function _getInput(IOPContractsManagerUtils.ExtraInstruction[] memory extraInstructions) + internal + view + returns (UpgradeSuperchainConfig.Input memory) + { + return UpgradeSuperchainConfig.Input({ + prank: prank, + opcm: address(mockOPCM), + superchainConfig: superchainConfig, + extraInstructions: extraInstructions + }); + } +} diff --git a/packages/contracts-bedrock/test/periphery/AssetReceiver.t.sol b/packages/contracts-bedrock/test/periphery/AssetReceiver.t.sol index 3e493984df1f2..45e7aa87dc58e 100644 --- a/packages/contracts-bedrock/test/periphery/AssetReceiver.t.sol +++ b/packages/contracts-bedrock/test/periphery/AssetReceiver.t.sol @@ -1,10 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { TestERC20 } from "test/mocks/TestERC20.sol"; import { TestERC721 } from "test/mocks/TestERC721.sol"; + +// Contracts import { AssetReceiver } from "src/periphery/AssetReceiver.sol"; /// @title AssetReceiver_TestInit diff --git a/packages/contracts-bedrock/test/periphery/Transactor.t.sol b/packages/contracts-bedrock/test/periphery/Transactor.t.sol index e62a484e6488b..a05bc2f6a530a 100644 --- a/packages/contracts-bedrock/test/periphery/Transactor.t.sol +++ b/packages/contracts-bedrock/test/periphery/Transactor.t.sol @@ -1,8 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Contracts import { Transactor } from "src/periphery/Transactor.sol"; /// @title Transactor_TestInit diff --git a/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol b/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol index 2d4261cc93d3e..fcc1b9406b31a 100644 --- a/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol +++ b/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol @@ -1,11 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; -// Target contract +// Contracts +import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; import { TransferOnion } from "src/periphery/TransferOnion.sol"; /// @title TransferOnion_TestInit diff --git a/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol b/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol index e230f70590249..e1869ef176d1e 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/Drippie.t.sol @@ -1,11 +1,16 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { SimpleStorage } from "test/mocks/SimpleStorage.sol"; + +// Contracts import { Drippie } from "src/periphery/drippie/Drippie.sol"; -import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; import { CheckTrue } from "src/periphery/drippie/dripchecks/CheckTrue.sol"; -import { SimpleStorage } from "test/mocks/SimpleStorage.sol"; + +// Interfaces +import { IDripCheck } from "src/periphery/drippie/IDripCheck.sol"; /// @title TestDrippie /// @notice This is a wrapper contract around Drippie used for testing. Returning an entire diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol index bfc051c9eace6..980c46dc044f4 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Contracts import { CheckBalanceLow } from "src/periphery/drippie/dripchecks/CheckBalanceLow.sol"; /// @title CheckBalanceLow_TestInit diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol index 9da99bfcd9750..891c6d874d59c 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Contracts import { CheckSecrets } from "src/periphery/drippie/dripchecks/CheckSecrets.sol"; /// @title CheckSecrets_TestInit diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol index 4fc04862b872c..ef6dccdfb5a8e 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Contracts import { CheckTrue } from "src/periphery/drippie/dripchecks/CheckTrue.sol"; /// @title CheckTrue_TestInit diff --git a/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol b/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol index 98c97df6556de..7d663af9c1c93 100644 --- a/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol +++ b/packages/contracts-bedrock/test/periphery/faucet/Faucet.t.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { FaucetHelper } from "test/mocks/FaucetHelper.sol"; + +// Contracts import { Faucet } from "src/periphery/faucet/Faucet.sol"; import { AdminFaucetAuthModule } from "src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol"; -import { FaucetHelper } from "test/mocks/FaucetHelper.sol"; /// @title Faucet_TestInit /// @notice Reusable test initialization for `Faucet` tests. diff --git a/packages/contracts-bedrock/test/periphery/faucet/authmodules/AdminFaucetAuthModule.t.sol b/packages/contracts-bedrock/test/periphery/faucet/authmodules/AdminFaucetAuthModule.t.sol index 2bc5e7b4d567e..57ca1937a7262 100644 --- a/packages/contracts-bedrock/test/periphery/faucet/authmodules/AdminFaucetAuthModule.t.sol +++ b/packages/contracts-bedrock/test/periphery/faucet/authmodules/AdminFaucetAuthModule.t.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; +import { FaucetHelper } from "test/mocks/FaucetHelper.sol"; + +// Contracts import { AdminFaucetAuthModule } from "src/periphery/faucet/authmodules/AdminFaucetAuthModule.sol"; import { Faucet } from "src/periphery/faucet/Faucet.sol"; -import { FaucetHelper } from "test/mocks/FaucetHelper.sol"; /// @title AdminFaucetAuthModule_TestInit /// @notice Reusable test initialization for `AdminFaucetAuthModule` tests. diff --git a/packages/contracts-bedrock/test/safe-tools/SafeTestTools.sol b/packages/contracts-bedrock/test/safe-tools/SafeTestTools.sol index 15457e8aefcb2..35b0ef9ee8edf 100644 --- a/packages/contracts-bedrock/test/safe-tools/SafeTestTools.sol +++ b/packages/contracts-bedrock/test/safe-tools/SafeTestTools.sol @@ -1,16 +1,24 @@ // SPDX-License-Identifier: MIT pragma solidity >=0.7.0 <0.9.0; -import "forge-std/Test.sol"; -import { LibSort } from "@solady/utils/LibSort.sol"; +// Forge +import { console2 as console } from "forge-std/console2.sol"; +import { Vm } from "forge-std/Vm.sol"; + +// Testing +import "./CompatibilityFallbackHandler_1_3_0.sol"; + +// Contracts import { Safe as GnosisSafe } from "safe-contracts/Safe.sol"; +import { SafeProxyFactory as GnosisSafeProxyFactory } from "safe-contracts/proxies/SafeProxyFactory.sol"; +import { SignMessageLib } from "safe-contracts/libraries/SignMessageLib.sol"; + +// Libraries +import { LibSort } from "@solady/utils/LibSort.sol"; import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; import { GuardManager } from "safe-contracts/base/GuardManager.sol"; -import { SafeProxyFactory as GnosisSafeProxyFactory } from "safe-contracts/proxies/SafeProxyFactory.sol"; import { Enum } from "safe-contracts/common/Enum.sol"; -import { SignMessageLib } from "safe-contracts/libraries/SignMessageLib.sol"; -import "./CompatibilityFallbackHandler_1_3_0.sol"; // Tools to simplify testing Safe contracts // Author: Colin Nielsen (https://github.com/colinnielsen/safe-tools) diff --git a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol index 11891092201ef..17733a9bda303 100644 --- a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol @@ -1,17 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { StdUtils } from "forge-std/StdUtils.sol"; -import { StdCheats } from "forge-std/StdCheats.sol"; -import { Safe } from "safe-contracts/Safe.sol"; -import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; -import { Enum } from "safe-contracts/common/Enum.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import "test/safe-tools/SafeTestTools.sol"; -import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +// Contracts +import { Safe } from "safe-contracts/Safe.sol"; +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; import { LivenessGuard } from "src/safe/LivenessGuard.sol"; +// Libraries +import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; +import { Enum } from "safe-contracts/common/Enum.sol"; + /// @notice A wrapper contract exposing the length of the ownersBefore set in the LivenessGuard. contract LivenessGuard_WrappedGuard_Harness is LivenessGuard { using EnumerableSet for EnumerableSet.AddressSet; @@ -165,7 +167,7 @@ contract LivenessGuard_ShowLiveness_Test is LivenessGuard_TestInit { /// @title LivenessGuard_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `LivenessGuard` /// contract or are testing multiple functions at once. -contract LivenessGuard_Uncategorized_Test is StdCheats, StdUtils, LivenessGuard_TestInit { +contract LivenessGuard_Uncategorized_Test is LivenessGuard_TestInit { using SafeTestLib for SafeInstance; /// @notice Enumerates the possible owner management operations diff --git a/packages/contracts-bedrock/test/safe/LivenessModule.t.sol b/packages/contracts-bedrock/test/safe/LivenessModule.t.sol index ee69895946bf2..12b15d5ebfab9 100644 --- a/packages/contracts-bedrock/test/safe/LivenessModule.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessModule.t.sol @@ -1,14 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { Safe } from "safe-contracts/Safe.sol"; -import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import "test/safe-tools/SafeTestTools.sol"; +// Contracts +import { Safe } from "safe-contracts/Safe.sol"; import { LivenessModule } from "src/safe/LivenessModule.sol"; import { LivenessGuard } from "src/safe/LivenessGuard.sol"; +// Libraries +import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; + /// @title LivenessModule_TestInit /// @notice Reusable test initialization for `LivenessModule` tests. abstract contract LivenessModule_TestInit is Test, SafeTestTools { diff --git a/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol b/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol index e64b0b27e2d39..1059c0703ec72 100644 --- a/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol @@ -1,17 +1,21 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { Enum } from "safe-contracts/common/Enum.sol"; -import { Safe } from "safe-contracts/Safe.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import "test/safe-tools/SafeTestTools.sol"; -import { Constants } from "src/libraries/Constants.sol"; +import { DummyGuard } from "test/mocks/DummyGuard.sol"; +// Contracts +import { Safe } from "safe-contracts/Safe.sol"; import { LivenessModule2 } from "src/safe/LivenessModule2.sol"; import { SaferSafes } from "src/safe/SaferSafes.sol"; + +// Libraries +import { Enum } from "safe-contracts/common/Enum.sol"; +import { Constants } from "src/libraries/Constants.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; import { GuardManager } from "safe-contracts/base/GuardManager.sol"; -import { DummyGuard } from "test/mocks/DummyGuard.sol"; /// @title LivenessModule2_TestUtils /// @notice Reusable helper methods for LivenessModule2 tests. diff --git a/packages/contracts-bedrock/test/safe/SafeSigners.t.sol b/packages/contracts-bedrock/test/safe/SafeSigners.t.sol index f59ad036064fd..b2aec90c5a12c 100644 --- a/packages/contracts-bedrock/test/safe/SafeSigners.t.sol +++ b/packages/contracts-bedrock/test/safe/SafeSigners.t.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { SafeSigners } from "src/safe/SafeSigners.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import "test/safe-tools/SafeTestTools.sol"; +// Contracts +import { SafeSigners } from "src/safe/SafeSigners.sol"; + /// @title SafeSigners_TestInit /// @notice Reusable test initialization for `SafeSigners` tests. abstract contract SafeSigners_TestInit is Test { diff --git a/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol b/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol index 2333c23208dc9..0bb2e4b84c0fa 100644 --- a/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol @@ -1,15 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; -import { Safe } from "safe-contracts/Safe.sol"; -import { GuardManager } from "safe-contracts/base/GuardManager.sol"; -import { ITransactionGuard } from "interfaces/safe/ITransactionGuard.sol"; +// Testing import "test/safe-tools/SafeTestTools.sol"; +import { Test } from "test/setup/Test.sol"; +import { stdStorage, StdStorage } from "forge-std/StdStorage.sol"; +// Contracts +import { Safe } from "safe-contracts/Safe.sol"; import { TimelockGuard } from "src/safe/TimelockGuard.sol"; import { SaferSafes } from "src/safe/SaferSafes.sol"; +// Libraries +import { GuardManager } from "safe-contracts/base/GuardManager.sol"; + +// Interfaces +import { ITransactionGuard } from "interfaces/safe/ITransactionGuard.sol"; + using TransactionBuilder for TransactionBuilder.Transaction; /// @title TransactionBuilder diff --git a/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol b/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol index 7f9df4cd2600a..eec100fa85db1 100644 --- a/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol +++ b/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol @@ -1,19 +1,27 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Forge +import { StdCheatsSafe } from "forge-std/StdCheats.sol"; + +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts import { DeployOwnership, SafeConfig, SecurityCouncilConfig, LivenessModuleConfig } from "scripts/deploy/DeployOwnership.s.sol"; -import { Test } from "forge-std/Test.sol"; +// Contracts import { Safe } from "safe-contracts/Safe.sol"; -import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; - import { LivenessModule2 } from "src/safe/LivenessModule2.sol"; +// Libraries +import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; + contract DeployOwnershipTest is Test, DeployOwnership { address internal constant SENTINEL_MODULES = address(0x1); @@ -22,6 +30,10 @@ contract DeployOwnershipTest is Test, DeployOwnership { run(); } + function makeAddr(string memory _name) internal override(Test, StdCheatsSafe) returns (address) { + return Test.makeAddr(_name); + } + /// @dev Helper function to make assertions on basic Safe config properties. function _checkSafeConfig(SafeConfig memory _safeConfig, Safe _safe) internal view { assertEq(_safe.getThreshold(), _safeConfig.threshold); diff --git a/packages/contracts-bedrock/test/scripts/FetchChainInfo.t.sol b/packages/contracts-bedrock/test/scripts/FetchChainInfo.t.sol index 95a93038751f7..906c47d37ff7f 100644 --- a/packages/contracts-bedrock/test/scripts/FetchChainInfo.t.sol +++ b/packages/contracts-bedrock/test/scripts/FetchChainInfo.t.sol @@ -1,8 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts import { FetchChainInfo, FetchChainInfoInput, FetchChainInfoOutput } from "scripts/FetchChainInfo.s.sol"; + +// Libraries import { GameTypes, GameType } from "src/dispute/lib/Types.sol"; import { LibGameType } from "src/dispute/lib/LibUDT.sol"; diff --git a/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol b/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol index f4943f82ea8b4..e157c1cff9970 100644 --- a/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol +++ b/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol @@ -1,11 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; + +// Scripts import { L2Genesis } from "scripts/L2Genesis.s.sol"; -import { Predeploys } from "src/libraries/Predeploys.sol"; import { LATEST_FORK } from "scripts/libraries/Config.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; + +// Interfaces import { ISuperchainRevSharesCalculator } from "interfaces/L2/ISuperchainRevSharesCalculator.sol"; import { ISequencerFeeVault } from "interfaces/L2/ISequencerFeeVault.sol"; import { IBaseFeeVault } from "interfaces/L2/IBaseFeeVault.sol"; diff --git a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol index 48407923434c9..904d3538eb912 100644 --- a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol +++ b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol @@ -13,6 +13,7 @@ import { VerifyOPCM } from "scripts/deploy/VerifyOPCM.s.sol"; // Interfaces import { IOPContractsManager, IOPContractsManagerUpgrader } from "interfaces/L1/IOPContractsManager.sol"; +import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; contract VerifyOPCM_Harness is VerifyOPCM { function loadArtifactInfo(string memory _artifactPath) public view returns (ArtifactInfo memory) { @@ -42,6 +43,10 @@ contract VerifyOPCM_Harness is VerifyOPCM { return _verifyContractsContainerConsistency(_propRefs); } + function verifyOpcmUtilsConsistency(OpcmContractRef[] memory _propRefs) public view { + return _verifyOpcmUtilsConsistency(_propRefs); + } + function verifyOpcmImmutableVariables(IOPContractsManager _opcm) public returns (bool) { return _verifyOpcmImmutableVariables(_opcm); } @@ -395,6 +400,80 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { assertGt(componentsWithContainerTested, 0, "Should have tested at least one component"); } + /// @notice Tests that the script verifies all component contracts with opcmUtils() have the same address. + function test_verifyOpcmUtilsConsistency_succeeds() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Only run for OPCM V2 + skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // This should succeed with the current setup where all contracts have the same opcmUtils address. + harness.verifyOpcmUtilsConsistency(propRefs); + } + + /// @notice Tests that the script reverts when contracts have different opcmUtils addresses. + function test_verifyOpcmUtilsConsistency_mismatch_reverts() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Only run for OPCM V2 + skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // Create a different address to simulate a mismatch. + address differentUtils = address(0x9999999999999999999999999999999999999999); + + // Mock the first component with opcmUtils() to return a different address + _mockFirstOpcmUtilsComponent(propRefs, differentUtils); + + // Now the consistency check should fail. + vm.expectRevert(VerifyOPCM.VerifyOPCM_OpcmUtilsMismatch.selector); + harness.verifyOpcmUtilsConsistency(propRefs); + } + + /// @notice Tests that each OPCM component with opcmUtils() can be individually tested for mismatch. + function test_verifyOpcmUtilsConsistency_eachComponent_reverts() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Only run for OPCM V2 + skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // Test each OPCM component individually (only those that actually have opcmUtils()) + address differentUtils = address(0x9999999999999999999999999999999999999999); + + uint256 componentsWithUtilsTested = 0; + for (uint256 i = 0; i < propRefs.length; i++) { + string memory field = propRefs[i].field; + if (_hasOpcmUtils(field)) { + // Mock this specific component to return a different address + vm.mockCall( + propRefs[i].addr, abi.encodeCall(IOPContractsManagerV2.opcmUtils, ()), abi.encode(differentUtils) + ); + + // The consistency check should fail + vm.expectRevert(VerifyOPCM.VerifyOPCM_OpcmUtilsMismatch.selector); + harness.verifyOpcmUtilsConsistency(propRefs); + + // Clear the mock for next iteration + vm.clearMockedCalls(); + componentsWithUtilsTested++; + } + } + + // Ensure we actually tested some components (currently: opcmV2, opcmMigrator) + assertGt(componentsWithUtilsTested, 0, "Should have tested at least one component with opcmUtils"); + } + function _isDisputeGameV2ContractRef(VerifyOPCM.OpcmContractRef memory ref) internal pure returns (bool) { return LibString.eq(ref.name, "FaultDisputeGameV2") || LibString.eq(ref.name, "PermissionedDisputeGameV2"); } @@ -452,6 +531,35 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { return true; } + /// @notice Helper function to check if a field represents an OPCM component that has opcmUtils(). + /// @param _field The field name to check. + /// @return True if the field represents an OPCM component with opcmUtils(), false otherwise. + function _hasOpcmUtils(string memory _field) internal pure returns (bool) { + // Only opcmV2 and opcmMigrator have opcmUtils() via OPContractsManagerUtilsCaller + return LibString.eq(_field, "opcmV2") || LibString.eq(_field, "opcmMigrator"); + } + + /// @notice Utility function to mock the first OPCM component's opcmUtils address. + /// @param _propRefs Array of property references to search through. + /// @param _mockAddress The address to mock the opcmUtils call to return. + function _mockFirstOpcmUtilsComponent( + VerifyOPCM.OpcmContractRef[] memory _propRefs, + address _mockAddress + ) + internal + { + for (uint256 i = 0; i < _propRefs.length; i++) { + string memory field = _propRefs[i].field; + // Check if this is an OPCM component that has opcmUtils() + if (_hasOpcmUtils(field)) { + vm.mockCall( + _propRefs[i].addr, abi.encodeCall(IOPContractsManagerV2.opcmUtils, ()), abi.encode(_mockAddress) + ); + return; + } + } + } + /// @notice Tests that immutable variables are correctly verified in the OPCM contract. function test_verifyOpcmImmutableVariables_succeeds() public { // Coverage changes bytecode and causes failures, skip. diff --git a/packages/contracts-bedrock/test/setup/CommonTest.sol b/packages/contracts-bedrock/test/setup/CommonTest.sol index 529534b7ceb6f..daea37745ae3e 100644 --- a/packages/contracts-bedrock/test/setup/CommonTest.sol +++ b/packages/contracts-bedrock/test/setup/CommonTest.sol @@ -1,10 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -// Forge -import { Test } from "forge-std/Test.sol"; - // Testing +import { Test } from "test/setup/Test.sol"; import { Setup } from "test/setup/Setup.sol"; import { Events } from "test/setup/Events.sol"; import { FFIInterface } from "test/setup/FFIInterface.sol"; @@ -31,6 +29,9 @@ abstract contract CommonTest is Test, Setup, Events { bytes32 constant nonZeroHash = keccak256(abi.encode("NON_ZERO")); + /// @notice The default initial bond value for dispute games. + uint256 constant DEFAULT_DISPUTE_GAME_INIT_BOND = 0.08 ether; + FFIInterface constant ffi = FFIInterface(address(uint160(uint256(keccak256(abi.encode("optimism.ffi")))))); bool useAltDAOverride; diff --git a/packages/contracts-bedrock/test/setup/FeatureFlags.sol b/packages/contracts-bedrock/test/setup/FeatureFlags.sol index 015e8613801c9..300bfea09d982 100644 --- a/packages/contracts-bedrock/test/setup/FeatureFlags.sol +++ b/packages/contracts-bedrock/test/setup/FeatureFlags.sol @@ -7,6 +7,7 @@ import { Vm } from "forge-std/Vm.sol"; // Libraries import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { Features } from "src/libraries/Features.sol"; import { Config } from "scripts/libraries/Config.sol"; // Interfaces @@ -24,6 +25,9 @@ abstract contract FeatureFlags { /// @notice The address of the SystemConfig contract. ISystemConfig internal sysCfg; + /// @notice Thrown when an unknown feature is provided. + error FeatureFlags_UnknownFeature(bytes32); + /// @notice Sets the address of the SystemConfig contract. /// @param _sysCfg The address of the SystemConfig contract. function setSystemConfig(ISystemConfig _sysCfg) public { @@ -31,6 +35,7 @@ abstract contract FeatureFlags { } /// @notice Resolves the development feature bitmap. + /// @dev When updating this function, make sure to also update the getFeatureName function. function resolveFeaturesFromEnv() public { if (Config.devFeatureInterop()) { console.log("Setup: DEV_FEATURE__OPTIMISM_PORTAL_INTEROP is enabled"); @@ -42,6 +47,25 @@ abstract contract FeatureFlags { } } + /// @notice Returns the string name of a feature. + /// @param _feature The feature to get the name of. + /// @return The name of the feature. + function getFeatureName(bytes32 _feature) public pure returns (string memory) { + if (_feature == DevFeatures.OPTIMISM_PORTAL_INTEROP) { + return "DEV_FEATURE__OPTIMISM_PORTAL_INTEROP"; + } else if (_feature == DevFeatures.OPCM_V2) { + return "DEV_FEATURE__OPCM_V2"; + } else if (_feature == Features.CUSTOM_GAS_TOKEN) { + return "SYS_FEATURE__CUSTOM_GAS_TOKEN"; + } else if (_feature == Features.ETH_LOCKBOX) { + return "SYS_FEATURE__ETH_LOCKBOX"; + } else { + // NOTE: We error out here so that developers remember to actually name their features + // above. Solidity doesn't have anything like reflection that could do this. + revert FeatureFlags_UnknownFeature(_feature); + } + } + /// @notice Enables a feature. /// @param _feature The feature to set. function setDevFeatureEnabled(bytes32 _feature) public { @@ -72,7 +96,7 @@ abstract contract FeatureFlags { /// @param _feature The feature to check. function skipIfSysFeatureEnabled(bytes32 _feature) public { if (isSysFeatureEnabled(_feature)) { - vm.skip(true); + vm.skip(true, string.concat("Skipping test because ", getFeatureName(_feature), " is enabled")); } } @@ -80,7 +104,7 @@ abstract contract FeatureFlags { /// @param _feature The feature to check. function skipIfSysFeatureDisabled(bytes32 _feature) public { if (!isSysFeatureEnabled(_feature)) { - vm.skip(true); + vm.skip(true, string.concat("Skipping test because ", getFeatureName(_feature), " is disabled")); } } @@ -88,7 +112,7 @@ abstract contract FeatureFlags { /// @param _feature The feature to check. function skipIfDevFeatureEnabled(bytes32 _feature) public { if (isDevFeatureEnabled(_feature)) { - vm.skip(true); + vm.skip(true, string.concat("Skipping test because ", getFeatureName(_feature), " is enabled")); } } @@ -96,7 +120,7 @@ abstract contract FeatureFlags { /// @param _feature The feature to check. function skipIfDevFeatureDisabled(bytes32 _feature) public { if (!isDevFeatureEnabled(_feature)) { - vm.skip(true); + vm.skip(true, string.concat("Skipping test because ", getFeatureName(_feature), " is disabled")); } } } diff --git a/packages/contracts-bedrock/test/setup/ForkLive.s.sol b/packages/contracts-bedrock/test/setup/ForkLive.s.sol index d8c6f4a5270c9..ba151832ff0d7 100644 --- a/packages/contracts-bedrock/test/setup/ForkLive.s.sol +++ b/packages/contracts-bedrock/test/setup/ForkLive.s.sol @@ -272,36 +272,36 @@ contract ForkLive is Deployer, StdAssertions, DisputeGames { address proposer = permissionedGameProposer(disputeGameFactory); // Prepare the upgrade input. - IOPContractsManagerV2.DisputeGameConfig[] memory disputeGameConfigs = - new IOPContractsManagerV2.DisputeGameConfig[](3); - disputeGameConfigs[0] = IOPContractsManagerV2.DisputeGameConfig({ + IOPContractsManagerUtils.DisputeGameConfig[] memory disputeGameConfigs = + new IOPContractsManagerUtils.DisputeGameConfig[](3); + disputeGameConfigs[0] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON), gameType: GameTypes.CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.FaultDisputeGameConfig({ + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(keccak256("cannonPrestate"))) }) ) }); - disputeGameConfigs[1] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[1] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.PERMISSIONED_CANNON), gameType: GameTypes.PERMISSIONED_CANNON, gameArgs: abi.encode( - IOPContractsManagerV2.PermissionedDisputeGameConfig({ + IOPContractsManagerUtils.PermissionedDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(keccak256("cannonPrestate"))), proposer: proposer, challenger: challenger }) ) }); - disputeGameConfigs[2] = IOPContractsManagerV2.DisputeGameConfig({ + disputeGameConfigs[2] = IOPContractsManagerUtils.DisputeGameConfig({ enabled: true, initBond: disputeGameFactory.initBonds(GameTypes.CANNON_KONA), gameType: GameTypes.CANNON_KONA, gameArgs: abi.encode( - IOPContractsManagerV2.FaultDisputeGameConfig({ + IOPContractsManagerUtils.FaultDisputeGameConfig({ absolutePrestate: Claim.wrap(bytes32(keccak256("cannonKonaPrestate"))) }) ) diff --git a/packages/contracts-bedrock/test/setup/Test.sol b/packages/contracts-bedrock/test/setup/Test.sol new file mode 100644 index 0000000000000..5ce331b910973 --- /dev/null +++ b/packages/contracts-bedrock/test/setup/Test.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Forge +import { Test as ForgeTest } from "forge-std/Test.sol"; + +/// @title Test +/// @notice Test is a minimal extension of the Test contract with op-specific tweaks. +abstract contract Test is ForgeTest { + /// @notice Makes an address without a private key, labels it, and cleans it. + /// @param _name The name of the address. + /// @return The address. + function makeAddr(string memory _name) internal virtual override returns (address) { + address addr = address(uint160(uint256(keccak256(abi.encode(_name))))); + destroyAccount(addr, address(0)); + vm.label(addr, _name); + return addr; + } +} diff --git a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol index dd9cf4eb1e305..3d11320d33ab2 100644 --- a/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/universal/CrossDomainMessenger.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing utilities -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol index 025b43ee89f29..6365f876969d9 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol @@ -5,6 +5,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; import { IOptimismMintableERC20 } from "interfaces/universal/IOptimismMintableERC20.sol"; import { ILegacyMintableERC20 } from "interfaces/legacy/ILegacyMintableERC20.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; /// @title OptimismMintableERC20_TestInit /// @notice Reusable test initialization for `OptimismMintableERC20` tests. @@ -14,44 +15,61 @@ abstract contract OptimismMintableERC20_TestInit is CommonTest { } /// @title OptimismMintableERC20_Permit2_Test -/// @notice Tests the `permit2` function of the `OptimismMintableERC20` contract. +/// @notice Tests the `PERMIT2` function of the `OptimismMintableERC20` contract. contract OptimismMintableERC20_Permit2_Test is OptimismMintableERC20_TestInit { - function test_permit2_transferFrom_succeeds() external { + /// @notice Tests that PERMIT2 can transfer tokens on behalf of any owner. + function testFuzz_permit2_transferFrom_succeeds(address _owner, address _to, uint256 _amount) external { + vm.assume(_owner != address(0)); + vm.assume(_to != address(0)); + vm.assume(_owner != _to); + vm.prank(address(l2StandardBridge)); - L2Token.mint(alice, 100); + L2Token.mint(_owner, _amount); - assertEq(L2Token.balanceOf(bob), 0); + assertEq(L2Token.balanceOf(_to), 0); vm.prank(L2Token.PERMIT2()); - L2Token.transferFrom(alice, bob, 100); - assertEq(L2Token.balanceOf(bob), 100); + L2Token.transferFrom(_owner, _to, _amount); + assertEq(L2Token.balanceOf(_to), _amount); } } /// @title OptimismMintableERC20_Allowance_Test /// @notice Tests the `allowance` function of the `OptimismMintableERC20` contract. contract OptimismMintableERC20_Allowance_Test is OptimismMintableERC20_TestInit { - function test_allowance_permit2Max_works() external view { - assertEq(L2Token.allowance(alice, L2Token.PERMIT2()), type(uint256).max); + /// @notice Tests that allowance returns max uint256 for PERMIT2 spender. + function testFuzz_allowance_permit2Spender_succeeds(address _owner) external view { + assertEq(L2Token.allowance(_owner, L2Token.PERMIT2()), type(uint256).max); + } + + /// @notice Tests that allowance returns the actual allowance for non-PERMIT2 spenders. + function testFuzz_allowance_nonPermit2Spender_succeeds(address _owner, address _spender) external view { + vm.assume(_spender != L2Token.PERMIT2()); + assertEq(L2Token.allowance(_owner, _spender), 0); } } /// @title OptimismMintableERC20_Mint_Test /// @notice Tests the `mint` function of the `OptimismMintableERC20` contract. contract OptimismMintableERC20_Mint_Test is OptimismMintableERC20_TestInit { - function test_mint_succeeds() external { + /// @notice Tests that minting tokens succeeds when called by the bridge. + function testFuzz_mint_fromBridge_succeeds(address _to, uint256 _amount) external { + vm.assume(_to != address(0)); + vm.expectEmit(true, true, true, true); - emit Mint(alice, 100); + emit Mint(_to, _amount); vm.prank(address(l2StandardBridge)); - L2Token.mint(alice, 100); + L2Token.mint(_to, _amount); - assertEq(L2Token.balanceOf(alice), 100); + assertEq(L2Token.balanceOf(_to), _amount); } - function test_mint_notBridge_reverts() external { - // NOT the bridge + /// @notice Tests that minting reverts when called by a non-bridge address. + function testFuzz_mint_notBridge_reverts(address _caller) external { + vm.assume(_caller != address(l2StandardBridge)); + vm.expectRevert("OptimismMintableERC20: only bridge can mint and burn"); - vm.prank(address(alice)); + vm.prank(_caller); L2Token.mint(alice, 100); } } @@ -59,23 +77,28 @@ contract OptimismMintableERC20_Mint_Test is OptimismMintableERC20_TestInit { /// @title OptimismMintableERC20_Burn_Test /// @notice Tests the `burn` function of the `OptimismMintableERC20` contract. contract OptimismMintableERC20_Burn_Test is OptimismMintableERC20_TestInit { - function test_burn_succeeds() external { + /// @notice Tests that burning tokens succeeds when called by the bridge. + function testFuzz_burn_fromBridge_succeeds(address _from, uint256 _amount) external { + vm.assume(_from != address(0)); + vm.prank(address(l2StandardBridge)); - L2Token.mint(alice, 100); + L2Token.mint(_from, _amount); vm.expectEmit(true, true, true, true); - emit Burn(alice, 100); + emit Burn(_from, _amount); vm.prank(address(l2StandardBridge)); - L2Token.burn(alice, 100); + L2Token.burn(_from, _amount); - assertEq(L2Token.balanceOf(alice), 0); + assertEq(L2Token.balanceOf(_from), 0); } - function test_burn_notBridge_reverts() external { - // NOT the bridge + /// @notice Tests that burning reverts when called by a non-bridge address. + function testFuzz_burn_notBridge_reverts(address _caller) external { + vm.assume(_caller != address(l2StandardBridge)); + vm.expectRevert("OptimismMintableERC20: only bridge can mint and burn"); - vm.prank(address(alice)); + vm.prank(_caller); L2Token.burn(alice, 100); } } @@ -83,21 +106,32 @@ contract OptimismMintableERC20_Burn_Test is OptimismMintableERC20_TestInit { /// @title OptimismMintableERC20_SupportsInterface_Test /// @notice Tests the `supportsInterface` function of the `OptimismMintableERC20` contract. contract OptimismMintableERC20_SupportsInterface_Test is OptimismMintableERC20_TestInit { - function test_erc165_supportsInterface_succeeds() external view { + /// @notice Tests that the contract supports ERC165, ILegacyMintableERC20, and + /// IOptimismMintableERC20 interfaces. + function test_supportsInterface_supportedInterfaces_succeeds() external view { // The assertEq calls in this test are comparing the manual calculation of the iface, with // what is returned by the solidity's type().interfaceId, just to be safe. bytes4 iface1 = bytes4(keccak256("supportsInterface(bytes4)")); assertEq(iface1, type(IERC165).interfaceId); - assert(L2Token.supportsInterface(iface1)); + assertTrue(L2Token.supportsInterface(iface1)); bytes4 iface2 = L2Token.l1Token.selector ^ L2Token.mint.selector ^ L2Token.burn.selector; assertEq(iface2, type(ILegacyMintableERC20).interfaceId); - assert(L2Token.supportsInterface(iface2)); + assertTrue(L2Token.supportsInterface(iface2)); bytes4 iface3 = L2Token.remoteToken.selector ^ L2Token.bridge.selector ^ L2Token.mint.selector ^ L2Token.burn.selector; assertEq(iface3, type(IOptimismMintableERC20).interfaceId); - assert(L2Token.supportsInterface(iface3)); + assertTrue(L2Token.supportsInterface(iface3)); + } + + /// @notice Tests that the contract returns false for unsupported interfaces. + function testFuzz_supportsInterface_unsupportedInterface_succeeds(bytes4 _interfaceId) external view { + vm.assume(_interfaceId != type(IERC165).interfaceId); + vm.assume(_interfaceId != type(ILegacyMintableERC20).interfaceId); + vm.assume(_interfaceId != type(IOptimismMintableERC20).interfaceId); + + assertFalse(L2Token.supportsInterface(_interfaceId)); } } @@ -133,10 +167,29 @@ contract OptimismMintableERC20_Bridge_Test is OptimismMintableERC20_TestInit { } } +/// @title OptimismMintableERC20_Decimals_Test +/// @notice Tests the `decimals` function of the `OptimismMintableERC20` contract. +contract OptimismMintableERC20_Decimals_Test is OptimismMintableERC20_TestInit { + /// @notice Tests that decimals returns the expected value. + function test_decimals_succeeds() external view { + assertEq(L2Token.decimals(), 18); + } +} + +/// @title OptimismMintableERC20_Version_Test +/// @notice Tests the `version` function of the `OptimismMintableERC20` contract. +contract OptimismMintableERC20_Version_Test is OptimismMintableERC20_TestInit { + /// @notice Tests that version returns a valid semver string. + function test_version_validFormat_succeeds() external view { + SemverComp.parse(L2Token.version()); + } +} + /// @title OptimismMintableERC20_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `OptimismMintableERC20` /// contract. contract OptimismMintableERC20_Uncategorized_Test is OptimismMintableERC20_TestInit { + /// @notice Tests that legacy getters return the expected values. function test_legacy_succeeds() external view { // Getters for the remote token assertEq(L2Token.REMOTE_TOKEN(), address(L1Token)); diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index 930a1a28676f9..4abbce94bd31b 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -10,6 +10,9 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { OptimismMintableERC20 } from "src/universal/OptimismMintableERC20.sol"; import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +// Libraries +import { SemverComp } from "src/libraries/SemverComp.sol"; + // Interfaces import { IProxy } from "interfaces/universal/IProxy.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; @@ -67,7 +70,7 @@ contract OptimismMintableERC20Factory_Initialize_Test is OptimismMintableERC20Fa /// contract. contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMintableERC20Factory_TestInit { /// @notice Test that calling `createStandardL2Token` with valid parameters succeeds. - function test_createStandardL2Token_succeeds( + function testFuzz_createStandardL2Token_validParams_succeeds( address _caller, address _remoteToken, string memory _name, @@ -100,7 +103,7 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint /// @notice Test that calling `createOptimismMintableERC20WithDecimals` with valid parameters /// succeeds. - function test_createStandardL2TokenWithDecimals_succeeds( + function testFuzz_createOptimismMintableERC20WithDecimals_validParams_succeeds( address _caller, address _remoteToken, string memory _name, @@ -134,7 +137,7 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint } /// @notice Test that calling `createStandardL2Token` with the same parameters twice reverts. - function test_createStandardL2Token_sameTwice_reverts( + function testFuzz_createStandardL2Token_sameTwice_reverts( address _caller, address _remoteToken, string memory _name, @@ -156,9 +159,9 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint l2OptimismMintableERC20Factory.createStandardL2Token(_remoteToken, _name, _symbol); } - /// @notice Test that calling `createStandardL2TokenWithDecimals` with the same parameters - /// twice reverts. - function test_createStandardL2TokenWithDecimals_sameTwice_reverts( + /// @notice Test that calling `createOptimismMintableERC20WithDecimals` with the same + /// parameters twice reverts. + function testFuzz_createOptimismMintableERC20WithDecimals_sameTwice_reverts( address _caller, address _remoteToken, string memory _name, @@ -182,7 +185,7 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint } /// @notice Test that calling `createStandardL2Token` with a zero remote token address reverts. - function test_createStandardL2Token_remoteIsZero_reverts( + function testFuzz_createStandardL2Token_remoteIsZero_reverts( address _caller, string memory _name, string memory _symbol @@ -198,9 +201,9 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint l2OptimismMintableERC20Factory.createStandardL2Token(remote, _name, _symbol); } - /// @notice Test that calling `createStandardL2TokenWithDecimals` with a zero remote token - /// address reverts. - function test_createStandardL2TokenWithDecimals_remoteIsZero_reverts( + /// @notice Test that calling `createOptimismMintableERC20WithDecimals` with a zero remote + /// token address reverts. + function testFuzz_createOptimismMintableERC20WithDecimals_remoteIsZero_reverts( address _caller, string memory _name, string memory _symbol, @@ -218,6 +221,70 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint } } +/// @title OptimismMintableERC20Factory_CreateOptimismMintableERC20_Test +/// @notice Tests the `createOptimismMintableERC20` function of the `OptimismMintableERC20Factory` +/// contract. +contract OptimismMintableERC20Factory_CreateOptimismMintableERC20_Test is OptimismMintableERC20Factory_TestInit { + /// @notice Test that calling `createOptimismMintableERC20` with valid parameters succeeds. + function testFuzz_createOptimismMintableERC20_validParams_succeeds( + address _caller, + address _remoteToken, + string memory _name, + string memory _symbol + ) + external + { + // Assume + vm.assume(_remoteToken != address(0)); + + // Arrange + // createOptimismMintableERC20 defaults to 18 decimals + address local = _calculateTokenAddress(_remoteToken, _name, _symbol, 18); + + vm.expectEmit(address(l2OptimismMintableERC20Factory)); + emit StandardL2TokenCreated(_remoteToken, local); + + vm.expectEmit(address(l2OptimismMintableERC20Factory)); + emit OptimismMintableERC20Created(local, _remoteToken, _caller); + + // Act + vm.prank(_caller); + address addr = l2OptimismMintableERC20Factory.createOptimismMintableERC20(_remoteToken, _name, _symbol); + + // Assert + assertTrue(addr == local); + assertTrue(OptimismMintableERC20(local).decimals() == 18); + assertEq(l2OptimismMintableERC20Factory.deployments(local), _remoteToken); + } + + /// @notice Test that calling `createOptimismMintableERC20` with a zero remote token address + /// reverts. + function testFuzz_createOptimismMintableERC20_remoteIsZero_reverts( + address _caller, + string memory _name, + string memory _symbol + ) + external + { + // Arrange + address remote = address(0); + vm.expectRevert("OptimismMintableERC20Factory: must provide remote token address"); + + // Act + vm.prank(_caller); + l2OptimismMintableERC20Factory.createOptimismMintableERC20(remote, _name, _symbol); + } +} + +/// @title OptimismMintableERC20Factory_Version_Test +/// @notice Tests the `version` function of the `OptimismMintableERC20Factory` contract. +contract OptimismMintableERC20Factory_Version_Test is OptimismMintableERC20Factory_TestInit { + /// @notice Tests that version returns a valid semver string. + function test_version_validFormat_succeeds() external view { + SemverComp.parse(l2OptimismMintableERC20Factory.version()); + } +} + /// @title OptimismMintableERC20Factory_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `OptimismMintableERC20Factory` contract. diff --git a/packages/contracts-bedrock/test/universal/Proxy.t.sol b/packages/contracts-bedrock/test/universal/Proxy.t.sol index a19cb26d8af52..b3b7b52342e15 100644 --- a/packages/contracts-bedrock/test/universal/Proxy.t.sol +++ b/packages/contracts-bedrock/test/universal/Proxy.t.sol @@ -1,10 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Libraries import { Bytes32AddressLib } from "@rari-capital/solmate/src/utils/Bytes32AddressLib.sol"; + +// Interfaces import { IProxy } from "interfaces/universal/IProxy.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; contract Proxy_SimpleStorage_Harness { mapping(uint256 => uint256) internal store; diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index b86196e60bbb5..f81ec40007e15 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -2,9 +2,12 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; import { Proxy_SimpleStorage_Harness } from "test/universal/Proxy.t.sol"; +// Scripts +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + // Interfaces import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; @@ -12,8 +15,6 @@ import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy import { IProxy } from "interfaces/universal/IProxy.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - /// @title ProxyAdmin_TestInit /// @notice Reusable test initialization for `ProxyAdmin` tests. abstract contract ProxyAdmin_TestInit is Test { diff --git a/packages/contracts-bedrock/test/universal/ReinitializableBase.t.sol b/packages/contracts-bedrock/test/universal/ReinitializableBase.t.sol index 805da5b5fab89..a6d61ec6a3213 100644 --- a/packages/contracts-bedrock/test/universal/ReinitializableBase.t.sol +++ b/packages/contracts-bedrock/test/universal/ReinitializableBase.t.sol @@ -2,7 +2,7 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; // Contracts import { ReinitializableBase } from "src/universal/ReinitializableBase.sol"; diff --git a/packages/contracts-bedrock/test/universal/WETH98.t.sol b/packages/contracts-bedrock/test/universal/WETH98.t.sol index 96065c8c89096..8173dec67d3e5 100644 --- a/packages/contracts-bedrock/test/universal/WETH98.t.sol +++ b/packages/contracts-bedrock/test/universal/WETH98.t.sol @@ -2,12 +2,14 @@ pragma solidity 0.8.15; // Testing -import { Test } from "forge-std/Test.sol"; +import { Test } from "test/setup/Test.sol"; -// Contracts -import { IWETH98 } from "interfaces/universal/IWETH98.sol"; +// Scripts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +// Interfaces +import { IWETH98 } from "interfaces/universal/IWETH98.sol"; + /// @title WETH98_TestInit /// @notice Reusable test initialization for `WETH98` tests. abstract contract WETH98_TestInit is Test { diff --git a/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol b/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol index b803abba9fa90..68d838f692152 100644 --- a/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol +++ b/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol @@ -1,7 +1,10 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { Test } from "forge-std/Test.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; /// @title AddressAliasHelper_ApplyL1ToL2Alias_Test diff --git a/packages/contracts-bedrock/test/vendor/Initializable.t.sol b/packages/contracts-bedrock/test/vendor/Initializable.t.sol index 6bde0b7ccbb78..e74a08deec2cd 100644 --- a/packages/contracts-bedrock/test/vendor/Initializable.t.sol +++ b/packages/contracts-bedrock/test/vendor/Initializable.t.sol @@ -398,7 +398,7 @@ contract Initializer_Test is CommonTest { excludes[j++] = "src/dispute/SuperFaultDisputeGame.sol"; excludes[j++] = "src/dispute/PermissionedDisputeGame.sol"; excludes[j++] = "src/dispute/SuperPermissionedDisputeGame.sol"; - excludes[j++] = "src/dispute/zk/OPSuccinctFaultDisputeGame.sol"; + excludes[j++] = "src/dispute/zk/OptimisticZkGame.sol"; // TODO: Eventually remove this exclusion. Same reason as above dispute contracts. excludes[j++] = "src/L1/OPContractsManager.sol"; // TODO: Eventually remove this exclusion. Same reason as above dispute contracts. diff --git a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol index b12bade2a9fc9..c8077d79b04d5 100644 --- a/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol +++ b/packages/contracts-bedrock/test/vendor/InitializableOZv5.t.sol @@ -1,13 +1,22 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.25; -import { Test } from "forge-std/Test.sol"; -import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; -import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; + +// Contracts +import { Initializable } from "@openzeppelin/contracts-v5/proxy/utils/Initializable.sol"; + +// Libraries import { Types } from "src/libraries/Types.sol"; +// Interfaces +import { IOptimismSuperchainERC20 } from "interfaces/L2/IOptimismSuperchainERC20.sol"; +import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; + /// @title InitializerOZv5_Test /// @dev Ensures that the `initialize()` function on contracts cannot be called more than /// once. Tests the contracts inheriting from `Initializable` from OpenZeppelin Contracts v5.