diff --git a/.circleci/config.yml b/.circleci/config.yml index f948e751661..9f28fd8a9b9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -172,14 +172,14 @@ commands: user=$(whoami) echo "$user" > .executor-user echo "Set executor user to $user." - + if [[ "$user" == "root" ]]; then # Self-hosted runners will persist this cache between runs. Cleaning it up means that we # preserve the semantics of the cache regardless of executor type. It's also much faster # to delete the cache and recreate it than it is to overwrite it in place. rm -rf /data/mise-data echo "Cleaned up cache data." - + mkdir -p /data/mise-data echo "Created Mise data dir." mkdir -p ~/.cache @@ -206,7 +206,7 @@ commands: else curl https://mise.run | sh fi - + echo "export PATH=\"$HOME/.local/bin:\$PATH\"" >> "$BASH_ENV" echo "export MISE_DATA_DIR=/data/mise-data" >> "$BASH_ENV" echo "export MISE_JOBS=$(nproc)" >> "$BASH_ENV" @@ -676,8 +676,10 @@ jobs: working_directory: packages/contracts-bedrock contracts-bedrock-tests: - machine: true - resource_class: ethereum-optimism/latitude-1 + circleci_ip_ranges: true + docker: + - image: <> + resource_class: xlarge parameters: test_list: description: List of test files to run @@ -760,8 +762,10 @@ jobs: - notify-failures-on-develop contracts-bedrock-coverage: - machine: true - resource_class: ethereum-optimism/latitude-1 + circleci_ip_ranges: true + docker: + - image: <> + resource_class: 2xlarge parameters: test_flags: description: Additional flags to pass to the test command @@ -798,6 +802,15 @@ jobs: command: | sudo apt-get update sudo apt-get install -y lcov + - run: + name: Write pinned block number for cache key + command: | + just print-pinned-block-number > ./pinnedBlockNumber.txt + cat pinnedBlockNumber.txt + working_directory: packages/contracts-bedrock + - restore_cache: + name: Restore forked state + key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - run: name: Run coverage tests command: just coverage-lcov-all <> @@ -846,7 +859,6 @@ jobs: command: | just print-pinned-block-number > ./pinnedBlockNumber.txt cat pinnedBlockNumber.txt - pwd working_directory: packages/contracts-bedrock - restore_cache: name: Restore forked state @@ -910,8 +922,6 @@ jobs: command: validate-deploy-configs - run-contracts-check: command: lint - - run-contracts-check: - command: gas-snapshot-check - run-contracts-check: command: snapshots-check-no-build - run-contracts-check: @@ -952,8 +962,9 @@ jobs: description: should load in foundry artifacts type: boolean default: false - machine: true - resource_class: ethereum-optimism/latitude-1 + docker: + - image: <> + resource_class: xlarge steps: - checkout-with-mise - check-changed: @@ -1063,7 +1074,8 @@ jobs: gotestsum --format=testname \ --junitfile=./tmp/test-results/results.xml \ --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=2 \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ --packages="$formatted_packages" \ -- -parallel=$PARALLEL -coverprofile=coverage.out -timeout=<> - codecov/upload: @@ -1164,17 +1176,25 @@ jobs: echo "Publishing ${PRESTATE_HASH}, ${PRESTATE_MT64_HASH}, ${PRESTATE_INTEROP_HASH} as ${BRANCH_NAME}" if [[ "" != "<< pipeline.git.branch >>" ]] then + # Upload the git commit info for each prestate since this won't be recorded in releases.json + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate=${PRESTATE_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}.bin.gz.txt" + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64.bin.gz.txt" + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interop.bin.gz.txt" + + # Use the branch name for branches to provide a consistent URL PRESTATE_HASH="${BRANCH_NAME}" - PRESTATE_MT64_HASH="${BRANCH_NAME}" - PRESTATE_INTEROP_HASH="${BRANCH_NAME}" + PRESTATE_MT64_HASH="${BRANCH_NAME}-mt64" + PRESTATE_INTEROP_HASH="${BRANCH_NAME}-interop" fi gsutil cp ./op-program/bin/prestate.bin.gz \ "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_HASH}.bin.gz" + gsutil cp ./op-program/bin/prestate-mt64.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64_HASH}-mt64.bin.gz" + "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64_HASH}.bin.gz" + gsutil cp ./op-program/bin/prestate-interop.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_HASH}-interop.bin.gz" + "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_HASH}.bin.gz" - notify-failures-on-develop: mentions: "@proofs-team" @@ -1480,6 +1500,8 @@ workflows: uses_artifacts: true requires: ["contracts-bedrock-build"] - go-tests: + environment_overrides: | + export PARALLEL=48 packages: | op-alt-da op-batcher diff --git a/cannon/mipsevm/exec/mips_instructions.go b/cannon/mipsevm/exec/mips_instructions.go index a5035aaa708..319a7adec89 100644 --- a/cannon/mipsevm/exec/mips_instructions.go +++ b/cannon/mipsevm/exec/mips_instructions.go @@ -211,18 +211,18 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem shiftAmt := (insn >> 6) & 0x1F return SignExtend((rt<>((insn>>6)&0x1F), 32) + return SignExtend((rt&U32Mask)>>((insn>>6)&0x1F), 32) case 0x03: // sra shamt := Word((insn >> 6) & 0x1F) - return SignExtend((rt&0xFFFFFFFF)>>shamt, 32-shamt) + return SignExtend((rt&U32Mask)>>shamt, 32-shamt) case 0x04: // sllv shiftAmt := rs & 0x1F return SignExtend((rt<>(rs&0x1F), 32) + return SignExtend((rt&U32Mask)>>(rs&0x1F), 32) case 0x07: // srav shamt := Word(rs & 0x1F) - return SignExtend((rt&0xFFFFFFFF)>>shamt, 32-shamt) + return SignExtend((rt&U32Mask)>>shamt, 32-shamt) // functs in range [0x8, 0x1b] for 32-bit and [0x8, 0x1f] for 64-bit are handled specially by other functions case 0x08: // jr return rs @@ -360,14 +360,14 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem case 0x22: // lwl if arch.IsMips32 { val := mem << ((rs & 3) * 8) - mask := Word(uint32(0xFFFFFFFF) << ((rs & 3) * 8)) - return SignExtend(((rt & ^mask)|val)&0xFFFFFFFF, 32) + mask := Word(uint32(U32Mask) << ((rs & 3) * 8)) + return SignExtend(((rt & ^mask)|val)&U32Mask, 32) } else { // similar to the above mips32 implementation but loads are constrained to the nearest 4-byte memory word w := uint32(SelectSubWord(rs, mem, 4, false)) val := w << ((rs & 3) * 8) - mask := Word(uint32(0xFFFFFFFF) << ((rs & 3) * 8)) - return SignExtend(((rt & ^mask)|Word(val))&0xFFFFFFFF, 32) + mask := Word(uint32(U32Mask) << ((rs & 3) * 8)) + return SignExtend(((rt & ^mask)|Word(val))&U32Mask, 32) } case 0x23: // lw return SelectSubWord(rs, mem, 4, true) @@ -378,13 +378,13 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem case 0x26: // lwr if arch.IsMips32 { val := mem >> (24 - (rs&3)*8) - mask := Word(uint32(0xFFFFFFFF) >> (24 - (rs&3)*8)) - return SignExtend(((rt & ^mask)|val)&0xFFFFFFFF, 32) + mask := Word(uint32(U32Mask) >> (24 - (rs&3)*8)) + return SignExtend(((rt & ^mask)|val)&U32Mask, 32) } else { // similar to the above mips32 implementation but constrained to the nearest 4-byte memory word w := uint32(SelectSubWord(rs, mem, 4, false)) val := w >> (24 - (rs&3)*8) - mask := uint32(0xFFFFFFFF) >> (24 - (rs&3)*8) + mask := uint32(U32Mask) >> (24 - (rs&3)*8) lwrResult := (uint32(rt) & ^mask) | val if rs&3 == 3 { // loaded bit 31 return SignExtend(Word(lwrResult), 32) @@ -401,12 +401,12 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem case 0x2a: // swl if arch.IsMips32 { val := rt >> ((rs & 3) * 8) - mask := uint32(0xFFFFFFFF) >> ((rs & 3) * 8) + mask := uint32(U32Mask) >> ((rs & 3) * 8) return (mem & Word(^mask)) | val } else { sr := (rs & 3) << 3 - val := ((rt & 0xFFFFFFFF) >> sr) << (32 - ((rs & 0x4) << 3)) - mask := (uint64(0xFFFFFFFF) >> sr) << (32 - ((rs & 0x4) << 3)) + val := ((rt & U32Mask) >> sr) << (32 - ((rs & 0x4) << 3)) + mask := (uint64(U32Mask) >> sr) << (32 - ((rs & 0x4) << 3)) return (mem & Word(^mask)) | val } case 0x2b: // sw @@ -414,13 +414,13 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem case 0x2e: // swr if arch.IsMips32 { val := rt << (24 - (rs&3)*8) - mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8) + mask := uint32(U32Mask) << (24 - (rs&3)*8) return (mem & Word(^mask)) | val } else { // similar to the above mips32 implementation but constrained to the nearest 4-byte memory word w := uint32(SelectSubWord(rs, mem, 4, false)) val := rt << (24 - (rs&3)*8) - mask := uint32(0xFFFFFFFF) << (24 - (rs&3)*8) + mask := uint32(U32Mask) << (24 - (rs&3)*8) swrResult := (w & ^mask) | uint32(val) return UpdateSubWord(rs, mem, 4, Word(swrResult)) } @@ -440,7 +440,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem return val | (rt & mask) case 0x27: // lwu assertMips64(insn) - return (mem >> (32 - ((rs & 0x4) << 3))) & 0xFFFFFFFF + return (mem >> (32 - ((rs & 0x4) << 3))) & U32Mask case 0x2C: // sdl assertMips64(insn) sr := (rs & 0x7) << 3 diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 9f0812c8fd8..c43fb19e237 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -434,6 +434,7 @@ func TestEVM_SingleStep_MfhiMflo(t *testing.T) { } func TestEVM_SingleStep_MulDiv(t *testing.T) { + flip := testutil.FlipSign cases := []mulDivTestCase{ {name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(5), rt: Word(2), rdReg: uint32(0x8), expectRes: Word(10)}, // mul t0, t1, t2 {name: "mul", funct: uint32(0x2), opcode: uint32(28), rs: Word(0x1), rt: ^Word(0), rdReg: uint32(0x8), expectRes: ^Word(0)}, // mul t1, t2 @@ -451,9 +452,17 @@ func TestEVM_SingleStep_MulDiv(t *testing.T) { {name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_DD), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xAA_BB_CC_BE), expectLo: Word(0xFC_FC_FD_27)}, // multu t1, t2 {name: "multu", funct: uint32(0x19), rs: Word(0xFF_FF_FF_D3), rt: Word(0xAA_BB_CC_BE), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(0xAA_BB_CC_9F), expectLo: Word(0xFC_FD_02_9A)}, // multu t1, t2 - {name: "div", funct: uint32(0x1a), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // div t1, t2 - {name: "div by zero", funct: uint32(0x1a), rs: Word(5), rt: Word(0), rdReg: uint32(0x0), opcode: uint32(0), panicMsg: "instruction divide by zero", revertMsg: "division by zero"}, // div t1, t2 - {name: "divu", funct: uint32(0x1b), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // divu t1, t2 + {name: "div", funct: uint32(0x1a), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, // div t1, t2 + {name: "div w neg dividend", funct: uint32(0x1a), rs: flip(9), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: flip(1), expectLo: flip(4)}, // div t1, t2 + {name: "div w neg divisor", funct: uint32(0x1a), rs: 9, rt: flip(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: 1, expectLo: flip(4)}, // div t1, t2 + {name: "div w neg operands", funct: uint32(0x1a), rs: flip(9), rt: flip(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: flip(1), expectLo: 4}, // div t1, t2 + {name: "div by zero", funct: uint32(0x1a), rs: Word(5), rt: Word(0), rdReg: uint32(0x0), opcode: uint32(0), panicMsg: "instruction divide by zero", revertMsg: "division by zero"}, // div t1, t2 + {name: "divu", funct: uint32(0x1b), rs: Word(5), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: Word(1), expectLo: Word(2)}, + {name: "divu w neg dividend", funct: uint32(0x1b), rs: flip(9), rt: Word(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: 1, expectLo: (flip(9) & exec.U32Mask) >> 1}, // div t1, t2 + {name: "divu w neg divisor", funct: uint32(0x1b), rs: 9, rt: flip(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: 9, expectLo: 0}, // div t1, t2 + {name: "divu w neg divisor #2", funct: uint32(0x1b), rs: 2, rt: flip(9), rdReg: uint32(0x0), opcode: uint32(0), expectHi: 2, expectLo: 0}, // div t1, t2 + {name: "divu w neg operands", funct: uint32(0x1b), rs: flip(9), rt: flip(2), rdReg: uint32(0x0), opcode: uint32(0), expectHi: flip(9), expectLo: 0}, // divu t1, t2 + {name: "divu w neg operands #2", funct: uint32(0x1b), rs: flip(2), rt: flip(9), rdReg: uint32(0x0), opcode: uint32(0), expectHi: 7, expectLo: 1}, // divu t1, t2 {name: "divu by zero", funct: uint32(0x1b), rs: Word(5), rt: Word(0), rdReg: uint32(0x0), opcode: uint32(0), panicMsg: "instruction divide by zero", revertMsg: "division by zero"}, // divu t1, t2 } @@ -557,15 +566,19 @@ func TestEVM_SingleStep_SlSr(t *testing.T) { {name: "sll with sign extension", funct: uint16(4) << 6, rt: Word(0x0800_0000), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, {name: "sll with max shift, sign extension", funct: uint16(31) << 6, rt: Word(0x01), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, {name: "sll with max shift, overflow", funct: uint16(31) << 6, rt: Word(0x02), rsReg: uint32(0x0), expectVal: 0x0}, - {name: "srl", funct: uint16(4)<<6 | 2, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) >> uint8(4)}, // srl t0, t1, 3 - {name: "sra", funct: uint16(4)<<6 | 3, rt: Word(0x80_00_00_20), rsReg: uint32(0x0), expectVal: signExtend64(0xF8_00_00_02)}, // sra t0, t1, 3 - {name: "sllv", funct: uint16(4), rt: Word(0x20), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20) << Word(4)}, // sllv t0, t1, t2 + {name: "srl", funct: uint16(4)<<6 | 2, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) >> uint8(4)}, // srl t0, t1, 3 + {name: "srl with sign extension", funct: uint16(0)<<6 | 2, rt: Word(0x8000_0000), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, // srl t0, t1, 3 + {name: "sra", funct: uint16(4)<<6 | 3, rt: Word(0x70_00_00_20), rsReg: uint32(0x0), expectVal: signExtend64(0x07_00_00_02)}, // sra t0, t1, 3 + {name: "sra with sign extension", funct: uint16(4)<<6 | 3, rt: Word(0x80_00_00_20), rsReg: uint32(0x0), expectVal: signExtend64(0xF8_00_00_02)}, // sra t0, t1, 3 + {name: "sllv", funct: uint16(4), rt: Word(0x20), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20) << Word(4)}, // sllv t0, t1, t2 {name: "sllv with overflow", funct: uint16(4), rt: Word(0x8000_0000), rs: Word(1), rsReg: uint32(0xa), expectVal: 0x0}, {name: "sllv with sign extension", funct: uint16(4), rt: Word(0x0800_0000), rs: Word(4), rsReg: uint32(0xa), expectVal: signExtend64(0x8000_0000)}, {name: "sllv with max shift, sign extension", funct: uint16(4), rt: Word(0x01), rs: Word(31), rsReg: uint32(0xa), expectVal: signExtend64(0x8000_0000)}, {name: "sllv with max shift, overflow", funct: uint16(4), rt: Word(0x02), rs: Word(31), rsReg: uint32(0xa), expectVal: 0x0}, - {name: "srlv", funct: uint16(6), rt: Word(0x20_00), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20_00) >> Word(4)}, // srlv t0, t1, t2 - {name: "srav", funct: uint16(7), rt: Word(0xdeafbeef), rs: Word(12), rsReg: uint32(0xa), expectVal: signExtend64(Word(0xfffdeafb))}, // srav t0, t1, t2 + {name: "srlv", funct: uint16(6), rt: Word(0x20_00), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20_00) >> Word(4)}, // srlv t0, t1, t2 + {name: "srlv with sign extension", funct: uint16(6), rt: Word(0x8000_0000), rs: Word(0), rsReg: uint32(0xa), expectVal: signExtend64(0x8000_0000)}, // srlv t0, t1, t2 + {name: "srav", funct: uint16(7), rt: Word(0x1deafbee), rs: Word(12), rsReg: uint32(0xa), expectVal: signExtend64(Word(0x0001deaf))}, // srav t0, t1, t2 + {name: "srav with sign extension", funct: uint16(7), rt: Word(0xdeafbeef), rs: Word(12), rsReg: uint32(0xa), expectVal: signExtend64(Word(0xfffdeafb))}, // srav t0, t1, t2 } for _, v := range versions { diff --git a/cannon/mipsevm/testutil/arch.go b/cannon/mipsevm/testutil/arch.go index 7c557419895..8336212323f 100644 --- a/cannon/mipsevm/testutil/arch.go +++ b/cannon/mipsevm/testutil/arch.go @@ -70,3 +70,8 @@ func Cannon32OnlyTest(t testing.TB, msg string, args ...any) { t.Skipf(msg, args...) } } + +// FlipSign flips the sign of a 2's complement Word +func FlipSign(val Word) Word { + return ^val + 1 +} diff --git a/docs/handbook/pr-guidelines.md b/docs/handbook/pr-guidelines.md index 57913ebeaa5..7403eb76b19 100644 --- a/docs/handbook/pr-guidelines.md +++ b/docs/handbook/pr-guidelines.md @@ -34,6 +34,21 @@ This is organized by current state of PR, so it can be easily referenced frequen - **Explain Decisions/Tradeoffs**: Explain rationale for any design/architecture decisions and implementation details in the PR description. If it closes an issue, remember to mention the issue it closes, e.g. `Closes `. Otherwise, just link to the issue. If there is no issue, whatever details would have been in the issue should be in the PR description. - **Guide PR reviewers:** Let them know about areas of concern, under-tested areas, or vague requirements that should be ironed out. +### Triggering CI on PRs from external forks +If the PR is from an external fork, our CI suite will not automatically run on the PR. A reviewer with sufficient permissions (e.g. the automatically assigened reviewer) needs to comment on the PR wih + +> /ci authorize COMMITHASH + +or + +> /ci authorize https://github.com/ethereum-optimism/optimism/pull/PR_NUMBER/commits/COMMITHASH + +to trigger the CI suite to run. CI is a precondition for merging the PR and should be done before review is conducted, because it will reveal any failing tests or other problems such as linting errors. + +> [!NOTE] +> COMMITHASH and PR_NUMBER have their usual meanings but you must use the **full** commit hash and not a shortened version. Otherwise CI will not be triggered. + + ### Reviewing PRs - **Verify Requirements are Met**: If the PR claims to fix or close an issue, check that all the requirements in the issue are actually met. Otherwise the issue may be in a good place to merge, but just shouldn’t close the issue. diff --git a/go.mod b/go.mod index ad55be65ef5..dc7be7f0ff8 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/cockroachdb/pebble v1.1.4 - github.com/consensys/gnark-crypto v0.15.0 + github.com/consensys/gnark-crypto v0.16.0 github.com/crate-crypto/go-kzg-4844 v1.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 diff --git a/go.sum b/go.sum index 3133044f613..9a81dbcceb4 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,8 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs= github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.15.0 h1:OXsWnhheHV59eXIzhL5OIexa/vqTK8wtRYQCtwfMDtY= -github.com/consensys/gnark-crypto v0.15.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU= +github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo= +github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index 4be4866aa5c..360e9af9718 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -70,6 +70,7 @@ func NewL2Genesis(config *DeployConfig, l1StartHeader *types.Header) (*core.Gene GraniteTime: config.GraniteTime(l1StartTime), HoloceneTime: config.HoloceneTime(l1StartTime), IsthmusTime: config.IsthmusTime(l1StartTime), + PragueTime: config.IsthmusTime(l1StartTime), InteropTime: config.InteropTime(l1StartTime), Optimism: ¶ms.OptimismConfig{ EIP1559Denominator: eip1559Denom, diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 56251d65c34..f88a2630294 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -237,6 +237,7 @@ func InteropL2DevConfig(l1ChainID, l2ChainID uint64, addrs devkeys.Addresses) (* L2GenesisIsthmusTimeOffset: new(hexutil.Uint64), L2GenesisInteropTimeOffset: new(hexutil.Uint64), L1CancunTimeOffset: new(hexutil.Uint64), + L1PragueTimeOffset: new(hexutil.Uint64), UseInterop: true, }, L2CoreDeployConfig: genesis.L2CoreDeployConfig{ diff --git a/op-challenger/runner/prestates.go b/op-challenger/runner/prestates.go index ecbf19dbd89..19e3d40933d 100644 --- a/op-challenger/runner/prestates.go +++ b/op-challenger/runner/prestates.go @@ -109,6 +109,32 @@ func (f *NamedPrestateFetcher) getPrestate(ctx context.Context, logger log.Logge if err != nil { return "", fmt.Errorf("invalid prestate file %v: %w", f.filename, err) } - logger.Info("Downloaded named prestate", "filename", f.filename, "prestate", proof.ClaimValue) + + metadata, err := f.getPrestateMetadata(ctx, prestateBaseUrl) + if err != nil { + logger.Warn("Metadata unavailable for prestate", "prestate", f.filename, "err", err) + } + logger.Info("Downloaded named prestate", "filename", f.filename, "prestate", proof.ClaimValue, "metadata", metadata) return targetFile, nil } + +func (f *NamedPrestateFetcher) getPrestateMetadata(ctx context.Context, prestateBaseUrl *url.URL) (string, error) { + gitInfoUrl := prestateBaseUrl.JoinPath(f.filename + ".txt") + req, err := http.NewRequestWithContext(ctx, "GET", gitInfoUrl.String(), nil) + if err != nil { + return "", fmt.Errorf("failed to create prestate metadata request: %w", err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to fetch prestate metadata from %v: %w", gitInfoUrl, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("prestate metadata from url %v: status %v", gitInfoUrl, resp.StatusCode) + } + data, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read metadata from %v: %w", gitInfoUrl, err) + } + return string(data), nil +} diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json b/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json index 2b0111674da..5f66fec5635 100644 --- a/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json @@ -1,10 +1,11 @@ { "prank": "0x1Eb2fFc903729a0F03966B917003800b145F56E2", - "opcm": "0x0c9efe47eac86ee9868dda15c9c584025a7de1d0", + "opcm": "0xaf334f4537e87f5155d135392ff6d52f1866465e", "chainConfigs": [ { "systemConfigProxy": "0x034edD2A225f7f429A63E0f1D2084B9E0A93b538", - "proxyAdmin": "0x189aBAAaa82DfC015A588A7dbaD6F13b1D3485Bc" + "proxyAdmin": "0x189aBAAaa82DfC015A588A7dbaD6F13b1D3485Bc", + "absolutePrestate": "0x0000000000000000000000000000000000000000000000000000000000000abc" } ] } \ No newline at end of file diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go index 97fe565545c..579fbe3b7c9 100644 --- a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go @@ -19,9 +19,10 @@ type UpgradeOPChainInput struct { type OPChainConfig struct { SystemConfigProxy common.Address `json:"systemConfigProxy"` ProxyAdmin common.Address `json:"proxyAdmin"` + AbsolutePrestate common.Hash `json:"absolutePrestate"` } -var opChainConfigEncoder = w3.MustNewFunc("dummy((address systemConfigProxy,address proxyAdmin)[])", "") +var opChainConfigEncoder = w3.MustNewFunc("dummy((address systemConfigProxy,address proxyAdmin,bytes32 absolutePrestate)[])", "") func (u *UpgradeOPChainInput) OpChainConfigs() ([]byte, error) { data, err := opChainConfigEncoder.EncodeArgs(u.EncodedChainConfigs) diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go index 1074fba9c9d..6e5aef0769f 100644 --- a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go @@ -27,10 +27,12 @@ func TestUpgradeOPChainInput_OpChainConfigs(t *testing.T) { { SystemConfigProxy: common.Address{0x01}, ProxyAdmin: common.Address{0x02}, + AbsolutePrestate: common.Hash{0x03}, }, { SystemConfigProxy: common.Address{0x04}, ProxyAdmin: common.Address{0x05}, + AbsolutePrestate: common.Hash{0x06}, }, }, } @@ -42,8 +44,10 @@ func TestUpgradeOPChainInput_OpChainConfigs(t *testing.T) { "0000000000000000000000000000000000000000000000000000000000000002"+ "0000000000000000000000000100000000000000000000000000000000000000"+ "0000000000000000000000000200000000000000000000000000000000000000"+ + "0300000000000000000000000000000000000000000000000000000000000000"+ "0000000000000000000000000400000000000000000000000000000000000000"+ - "0000000000000000000000000500000000000000000000000000000000000000", + "0000000000000000000000000500000000000000000000000000000000000000"+ + "0600000000000000000000000000000000000000000000000000000000000000", hex.EncodeToString(data), ) } @@ -99,7 +103,7 @@ func TestUpgrader_Upgrade(t *testing.T) { { To: &addr, Data: []byte{ - 0x5d, 0x4e, 0xfc, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x2d, 0xd5, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -109,7 +113,9 @@ func TestUpgrader_Upgrade(t *testing.T) { 0xf1, 0xd2, 0x08, 0x4b, 0x9e, 0x0a, 0x93, 0xb5, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x9a, 0xba, 0xaa, 0xa8, 0x2d, 0xfc, 0x01, 0x5a, 0x58, 0x8a, 0x7d, 0xba, 0xd6, 0xf1, 0x3b, 0x1d, 0x34, - 0x85, 0xbc, + 0x85, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa, 0xbc, }, Value: (*hexutil.Big)(common.Big0), }, diff --git a/op-e2e/actions/interop/dsl.go b/op-e2e/actions/interop/dsl.go new file mode 100644 index 00000000000..89ec80a60a4 --- /dev/null +++ b/op-e2e/actions/interop/dsl.go @@ -0,0 +1,287 @@ +package interop + +import ( + "context" + "time" + + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/event" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +type ChainOpts struct { + Chains []*Chain +} + +func (c *ChainOpts) SetChains(chains ...*Chain) { + c.Chains = chains +} + +func (c *ChainOpts) AddChain(chain *Chain) { + c.Chains = append(c.Chains, chain) +} + +// InteropDSL provides a high-level API to drive interop action tests so that the actual test reads more declaratively +// and is separated from the details of how each action is actually executed. +// DSL methods will typically: +// 1. Check (and if needed, wait) for any required preconditions +// 2. Perform the action, allowing components to fully process the effects of it +// 3. Assert that the action completed. These are intended to be a sanity check to ensure tests fail fast if something +// doesn't work as expected. Options may be provided to perform more detailed or specific assertions +// +// Optional inputs can be used to control lower level details of the operation. While it is also possible to directly +// access the Actors and low level actions, this should only be required when specifically testing low level details of +// that functionality. It is generally preferable to use optional inputs to the DSL methods to achieve the desired result +// rather than having to use the low level APIs directly. +// +// Methods may also be provided specifically to verify some state. +// Methods may return some data from the system (e.g. OutputRootAtTimestamp) but it is generally preferred to provide +// an assertion method rather than a getter where that is viable. Assertion methods allow the DSL to provide more helpful +// information in failure messages and ensure the comparison is done correctly and consistently across tests rather than +// duplicating the assertion code in many tests. +// +// Required inputs to methods are specified as normal parameters, so type checking enforces their presence. +// Optional inputs to methods are specified by a config struct and accept a vararg of functions that can update that struct. +// This is roughly inline with the typical opts pattern in Golang but with significantly reduced boilerplate code since +// so many methods wil define their own config. With* methods are only provided for the most common optional args and +// tests will normally supply a custom function that sets all the optional values they need at once. +// Common options can be extracted to a reusable struct (e.g. ChainOpts above) which may expose helper methods to aid +// test readability and reduce boilerplate. +type InteropDSL struct { + t helpers.Testing + Actors *InteropActors + SuperRootSource *SuperRootSource + setup *InteropSetup + + // allChains contains all chains in the interop set. + // Currently this is always two chains, but as the setup code becomes more flexible it could be more + // and likely this array would be replaced by something in InteropActors + allChains []*Chain + createdUsers uint64 +} + +func NewInteropDSL(t helpers.Testing) *InteropDSL { + setup := SetupInterop(t) + actors := setup.CreateActors() + + t.Logf("ChainA: %v, ChainB: %v", actors.ChainA.ChainID, actors.ChainB.ChainID) + + allChains := []*Chain{actors.ChainA, actors.ChainB} + + // Get all the initial events processed + for _, chain := range allChains { + chain.Sequencer.ActL2PipelineFull(t) + chain.Sequencer.SyncSupervisor(t) + } + actors.Supervisor.ProcessFull(t) + + superRootSource, err := NewSuperRootSource( + t.Ctx(), + actors.ChainA.Sequencer.RollupClient(), + actors.ChainB.Sequencer.RollupClient()) + require.NoError(t, err) + + return &InteropDSL{ + t: t, + Actors: actors, + SuperRootSource: superRootSource, + setup: setup, + + allChains: allChains, + } +} + +func (d *InteropDSL) defaultChainOpts() ChainOpts { + return ChainOpts{ + // Defensive copy to make sure the original slice isn't modified + Chains: append([]*Chain{}, d.allChains...), + } +} + +func (d *InteropDSL) CreateUser() *DSLUser { + keyIndex := d.createdUsers + d.createdUsers++ + return &DSLUser{ + t: d.t, + index: keyIndex, + keys: d.setup.Keys, + } +} + +func (d *InteropDSL) SuperRoot(timestamp uint64) eth.Super { + ctx, cancel := context.WithTimeout(d.t.Ctx(), 30*time.Second) + defer cancel() + root, err := d.SuperRootSource.CreateSuperRoot(ctx, timestamp) + require.NoError(d.t, err) + return root +} + +func (d *InteropDSL) OutputRootAtTimestamp(chain *Chain, timestamp uint64) *eth.OutputResponse { + ctx, cancel := context.WithTimeout(d.t.Ctx(), 30*time.Second) + defer cancel() + blockNum, err := chain.RollupCfg.TargetBlockNumber(timestamp) + require.NoError(d.t, err) + output, err := chain.Sequencer.RollupClient().OutputAtBlock(ctx, blockNum) + require.NoError(d.t, err) + return output +} + +type TransactionCreator func(chain *Chain) (*types.Transaction, common.Address) +type AddL2BlockOpts struct { + BlockIsNotCrossSafe bool + TransactionCreators []TransactionCreator +} + +func WithL2BlockTransactions(mkTxs ...TransactionCreator) func(*AddL2BlockOpts) { + return func(o *AddL2BlockOpts) { + o.TransactionCreators = mkTxs + } +} + +// AddL2Block adds a new unsafe block to the specified chain and fully processes it in the supervisor +func (d *InteropDSL) AddL2Block(chain *Chain, optionalArgs ...func(*AddL2BlockOpts)) { + opts := AddL2BlockOpts{} + for _, arg := range optionalArgs { + arg(&opts) + } + priorSyncStatus := chain.Sequencer.SyncStatus() + chain.Sequencer.ActL2StartBlock(d.t) + for _, creator := range opts.TransactionCreators { + tx, from := creator(chain) + err := chain.SequencerEngine.EngineApi.IncludeTx(tx, from) + require.NoError(d.t, err) + } + chain.Sequencer.ActL2EndBlock(d.t) + chain.Sequencer.SyncSupervisor(d.t) + d.Actors.Supervisor.ProcessFull(d.t) + chain.Sequencer.ActL2PipelineFull(d.t) + + status := chain.Sequencer.SyncStatus() + expectedBlockNum := priorSyncStatus.UnsafeL2.Number + 1 + require.Equal(d.t, expectedBlockNum, status.UnsafeL2.Number, "Unsafe head did not advance") + if opts.BlockIsNotCrossSafe { + require.Equal(d.t, priorSyncStatus.CrossUnsafeL2.Number, status.CrossUnsafeL2.Number, "CrossUnsafe head advanced unexpectedly") + } else { + require.Equal(d.t, expectedBlockNum, status.CrossUnsafeL2.Number, "CrossUnsafe head did not advance") + } +} + +type SubmitBatchDataOpts struct { + ChainOpts + SkipCrossSafeUpdate bool +} + +// SubmitBatchData submits batch data to L1 and processes the new L1 blocks, advancing the safe heads. +// By default, submits all batch data for all chains. +func (d *InteropDSL) SubmitBatchData(optionalArgs ...func(*SubmitBatchDataOpts)) { + opts := SubmitBatchDataOpts{ + ChainOpts: d.defaultChainOpts(), + } + for _, arg := range optionalArgs { + arg(&opts) + } + txInclusion := make([]helpers.Action, 0, len(opts.Chains)) + for _, chain := range opts.Chains { + chain.Batcher.ActSubmitAll(d.t) + txInclusion = append(txInclusion, d.Actors.L1Miner.ActL1IncludeTx(chain.BatcherAddr)) + } + d.AdvanceL1(func(l1Opts *AdvanceL1Opts) { + l1Opts.TxInclusion = txInclusion + }) + + // Verify the local safe head advanced on each chain + for _, chain := range opts.Chains { + status := chain.Sequencer.SyncStatus() + require.Equalf(d.t, status.UnsafeL2, status.LocalSafeL2, "Chain %v did not fully advance local safe head", chain.ChainID) + + // Ingest the new local-safe event + chain.Sequencer.SyncSupervisor(d.t) + } + + if !opts.SkipCrossSafeUpdate { + d.ProcessCrossSafe(func(o *ProcessCrossSafeOpts) { + o.Chains = opts.Chains + }) + } +} + +type ProcessCrossSafeOpts struct { + ChainOpts +} + +// ProcessCrossSafe processes evens in the supervisor and nodes to ensure the cross-safe head is fully updated. +func (d *InteropDSL) ProcessCrossSafe(optionalArgs ...func(*ProcessCrossSafeOpts)) { + opts := ProcessCrossSafeOpts{ + ChainOpts: d.defaultChainOpts(), + } + for _, arg := range optionalArgs { + arg(&opts) + } + // Process cross-safe updates + d.Actors.Supervisor.ProcessFull(d.t) + + // Process updates on each chain and verify the cross-safe head advanced + for _, chain := range opts.Chains { + chain.Sequencer.ActL2PipelineFull(d.t) + status := chain.Sequencer.SyncStatus() + require.Equalf(d.t, status.UnsafeL2, status.SafeL2, "Chain %v did not fully advance safe head", chain.ChainID) + + chain.Sequencer.SyncSupervisor(d.t) + } + + // Re-run in case there was an invalid block that was replaced so it can now be considered safe + // TODO: Should this just loop until the cross safe heads stop updating or is once enough? + d.Actors.Supervisor.ProcessFull(d.t) + // Process updates on each chain and verify the cross-safe head advanced + for _, chain := range opts.Chains { + chain.Sequencer.ActL2PipelineFull(d.t) + status := chain.Sequencer.SyncStatus() + require.Equalf(d.t, status.UnsafeL2, status.SafeL2, "Chain %v did not fully advance safe head", chain.ChainID) + } +} + +type AdvanceL1Opts struct { + ChainOpts + L1BlockTimeSeconds uint64 + TxInclusion []helpers.Action +} + +// AdvanceL1 adds a new L1 block with the specified transactions and ensures it is processed by the specified chains +// and the supervisor. +func (d *InteropDSL) AdvanceL1(optionalArgs ...func(*AdvanceL1Opts)) { + opts := AdvanceL1Opts{ + ChainOpts: d.defaultChainOpts(), + L1BlockTimeSeconds: 12, + } + for _, arg := range optionalArgs { + arg(&opts) + } + expectedL1BlockNum := d.Actors.L1Miner.L1Chain().CurrentBlock().Number.Uint64() + 1 + d.Actors.L1Miner.ActL1StartBlock(opts.L1BlockTimeSeconds)(d.t) + for _, txInclusion := range opts.TxInclusion { + txInclusion(d.t) + } + d.Actors.L1Miner.ActL1EndBlock(d.t) + newBlock := eth.InfoToL1BlockRef(eth.HeaderBlockInfo(d.Actors.L1Miner.L1Chain().CurrentBlock())) + require.Equal(d.t, expectedL1BlockNum, newBlock.Number, "L1 head did not advance") + d.Actors.Supervisor.SignalLatestL1(d.t) + + // The node will exhaust L1 data, it needs the supervisor to see the L1 block first, and provide it to the node. + for _, chain := range opts.Chains { + chain.Sequencer.ActL2EventsUntil(d.t, event.Is[derive.ExhaustedL1Event], 100, false) + chain.Sequencer.SyncSupervisor(d.t) + chain.Sequencer.ActL2PipelineFull(d.t) + chain.Sequencer.ActL1HeadSignal(d.t) + } + + // Verify that the new L1 block was processed everywhere + for _, chain := range opts.Chains { + status := chain.Sequencer.SyncStatus() + require.Equalf(d.t, newBlock, status.HeadL1, "Chain %v did not detect new L1 head", chain.ChainID) + require.Equalf(d.t, newBlock, status.CurrentL1, "Chain %v did not process new L1 head", chain.ChainID) + } +} diff --git a/op-e2e/actions/interop/dsl_user.go b/op-e2e/actions/interop/dsl_user.go new file mode 100644 index 00000000000..2ec02bc70c7 --- /dev/null +++ b/op-e2e/actions/interop/dsl_user.go @@ -0,0 +1,29 @@ +package interop + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +type DSLUser struct { + t helpers.Testing + index uint64 + keys devkeys.Keys +} + +func (u *DSLUser) TransactOpts(chain *Chain) (*bind.TransactOpts, common.Address) { + privKey, err := u.keys.Secret(devkeys.ChainUserKeys(chain.ChainID.ToBig())(u.index)) + require.NoError(u.t, err) + opts, err := bind.NewKeyedTransactorWithChainID(privKey, chain.ChainID.ToBig()) + require.NoError(u.t, err) + opts.GasTipCap = big.NewInt(params.GWei) + + return opts, crypto.PubkeyToAddress(privKey.PublicKey) +} diff --git a/op-e2e/actions/interop/emitter.go b/op-e2e/actions/interop/emitter.go new file mode 100644 index 00000000000..49cd9e0fa1b --- /dev/null +++ b/op-e2e/actions/interop/emitter.go @@ -0,0 +1,49 @@ +package interop + +import ( + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/interop/contracts/bindings/emit" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +type EmitterContract struct { + t helpers.Testing + bindings *emit.Emit + address common.Address + + EmittedMessages []*GeneratedTransaction +} + +func NewEmitterContract(t helpers.Testing) *EmitterContract { + return &EmitterContract{ + t: t, + } +} + +func (c *EmitterContract) Deploy(user *DSLUser) TransactionCreator { + return func(chain *Chain) (*types.Transaction, common.Address) { + opts, from := user.TransactOpts(chain) + emitContract, tx, emitBindings, err := emit.DeployEmit(opts, chain.SequencerEngine.EthClient()) + require.NoError(c.t, err) + c.bindings = emitBindings + c.address = emitContract + return tx, from + } +} + +func (c *EmitterContract) EmitMessage(user *DSLUser, message string) TransactionCreator { + return func(chain *Chain) (*types.Transaction, common.Address) { + opts, from := user.TransactOpts(chain) + tx, err := c.bindings.EmitData(opts, []byte(message)) + require.NoError(c.t, err) + c.EmittedMessages = append(c.EmittedMessages, NewGeneratedTransaction(c.t, chain, tx)) + return tx, from + } +} + +func (c *EmitterContract) LastEmittedMessage() *GeneratedTransaction { + require.NotZero(c.t, c.EmittedMessages, "no messages have been emitted") + return c.EmittedMessages[len(c.EmittedMessages)-1] +} diff --git a/op-e2e/actions/interop/emitter_contract_test.go b/op-e2e/actions/interop/emitter_contract_test.go index a0b407bdc44..af7061ee9e4 100644 --- a/op-e2e/actions/interop/emitter_contract_test.go +++ b/op-e2e/actions/interop/emitter_contract_test.go @@ -225,8 +225,8 @@ func includeTxOnChain(t helpers.Testing, actors *InteropActors, chain *Chain, tx func assertHeads(t helpers.Testing, chain *Chain, unsafe, localSafe, crossUnsafe, safe uint64) { status := chain.Sequencer.SyncStatus() - require.Equal(t, unsafe, status.UnsafeL2.ID().Number) - require.Equal(t, crossUnsafe, status.CrossUnsafeL2.ID().Number) - require.Equal(t, localSafe, status.LocalSafeL2.ID().Number) - require.Equal(t, safe, status.SafeL2.ID().Number) + require.Equal(t, unsafe, status.UnsafeL2.ID().Number, "Unsafe") + require.Equal(t, crossUnsafe, status.CrossUnsafeL2.ID().Number, "Cross Unsafe") + require.Equal(t, localSafe, status.LocalSafeL2.ID().Number, "Local safe") + require.Equal(t, safe, status.SafeL2.ID().Number, "Safe") } diff --git a/op-e2e/actions/interop/inbox.go b/op-e2e/actions/interop/inbox.go new file mode 100644 index 00000000000..87ff93e0a52 --- /dev/null +++ b/op-e2e/actions/interop/inbox.go @@ -0,0 +1,40 @@ +package interop + +import ( + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/interop/contracts/bindings/inbox" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +type InboxContract struct { + t helpers.Testing + + Transactions []*GeneratedTransaction +} + +func NewInboxContract(t helpers.Testing) *InboxContract { + return &InboxContract{ + t: t, + } +} + +func (i *InboxContract) Execute(user *DSLUser, id inbox.Identifier, msg []byte) TransactionCreator { + return func(chain *Chain) (*types.Transaction, common.Address) { + opts, from := user.TransactOpts(chain) + contract, err := inbox.NewInbox(predeploys.CrossL2InboxAddr, chain.SequencerEngine.EthClient()) + require.NoError(i.t, err) + tx, err := contract.ValidateMessage(opts, id, crypto.Keccak256Hash(msg)) + require.NoError(i.t, err) + i.Transactions = append(i.Transactions, NewGeneratedTransaction(i.t, chain, tx)) + return tx, from + } +} + +func (i *InboxContract) LastTransaction() *GeneratedTransaction { + require.NotZero(i.t, i.Transactions, "no transactions created") + return i.Transactions[len(i.Transactions)-1] +} diff --git a/op-e2e/actions/interop/interop_test.go b/op-e2e/actions/interop/interop_test.go index 720b1185c97..be743ebdb5c 100644 --- a/op-e2e/actions/interop/interop_test.go +++ b/op-e2e/actions/interop/interop_test.go @@ -2,32 +2,21 @@ package interop import ( "context" - "fmt" - "log/slog" "math/big" "testing" "github.com/stretchr/testify/require" - "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/super" - challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" - fpHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/interop/contracts/bindings/inbox" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/event" "github.com/ethereum-optimism/optimism/op-node/rollup/interop/managed" - "github.com/ethereum-optimism/optimism/op-program/client/claim" - "github.com/ethereum-optimism/optimism/op-program/client/interop" - "github.com/ethereum-optimism/optimism/op-program/client/interop/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" - "github.com/ethereum-optimism/optimism/op-service/testlog" - gethTypes "github.com/ethereum/go-ethereum/core/types" ) func TestFullInterop(gt *testing.T) { @@ -74,6 +63,10 @@ func TestFullInterop(gt *testing.T) { require.Equal(t, uint64(0), status.LocalSafeL2.Number) require.Equal(t, uint64(0), status.SafeL2.Number) require.Equal(t, uint64(0), status.FinalizedL2.Number) + supervisorStatus, err := actors.Supervisor.SyncStatus() + require.NoError(t, err) + require.Equal(t, head, supervisorStatus.Chains[actors.ChainA.ChainID].LocalUnsafe.ID()) + require.Equal(t, uint64(0), supervisorStatus.MinSyncedL1.Number) // Submit the L2 block, sync the local-safe data actors.ChainA.Batcher.ActSubmitAll(t) @@ -97,6 +90,10 @@ func TestFullInterop(gt *testing.T) { require.Equal(t, head, status.LocalSafeL2.ID()) require.Equal(t, uint64(0), status.SafeL2.Number) require.Equal(t, uint64(0), status.FinalizedL2.Number) + supervisorStatus, err = actors.Supervisor.SyncStatus() + require.NoError(t, err) + require.Equal(t, head, supervisorStatus.Chains[actors.ChainA.ChainID].LocalUnsafe.ID()) + require.Equal(t, uint64(0), supervisorStatus.MinSyncedL1.Number) // Local-safe does not count as "safe" in RPC n := actors.ChainA.SequencerEngine.L2Chain().CurrentSafeBlock().Number.Uint64() require.Equal(t, uint64(0), n) @@ -116,6 +113,10 @@ func TestFullInterop(gt *testing.T) { require.Equal(t, head, status.LocalSafeL2.ID()) require.Equal(t, head, status.SafeL2.ID()) require.Equal(t, uint64(0), status.FinalizedL2.Number) + supervisorStatus, err = actors.Supervisor.SyncStatus() + require.NoError(t, err) + require.Equal(t, head, supervisorStatus.Chains[actors.ChainA.ChainID].LocalUnsafe.ID()) + require.Equal(t, uint64(1), supervisorStatus.MinSyncedL1.Number) h := actors.ChainA.SequencerEngine.L2Chain().CurrentSafeBlock().Hash() require.Equal(t, head.Hash, h) @@ -141,6 +142,10 @@ func TestFullInterop(gt *testing.T) { require.Equal(t, head, status.LocalSafeL2.ID()) require.Equal(t, head, status.SafeL2.ID()) require.Equal(t, head, status.FinalizedL2.ID()) + supervisorStatus, err = actors.Supervisor.SyncStatus() + require.NoError(t, err) + require.Equal(t, head, supervisorStatus.Chains[actors.ChainA.ChainID].LocalUnsafe.ID()) + require.Equal(t, uint64(1), supervisorStatus.MinSyncedL1.Number) } // TestFinality confirms that when L1 finality is updated on the supervisor, @@ -351,669 +356,3 @@ func TestInteropLocalSafeInvalidation(gt *testing.T) { status = actors.ChainB.Sequencer.SyncStatus() require.Equal(t, uint64(2), status.SafeL2.Number) } - -func TestInteropFaultProofs(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - - is := SetupInterop(t) - actors := is.CreateActors() - - // get both sequencers set up - actors.ChainA.Sequencer.ActL2PipelineFull(t) - actors.ChainB.Sequencer.ActL2PipelineFull(t) - - // sync the supervisor, handle initial events emitted by the nodes - actors.ChainA.Sequencer.SyncSupervisor(t) - actors.ChainB.Sequencer.SyncSupervisor(t) - - // No blocks yet - status := actors.ChainA.Sequencer.SyncStatus() - require.Equal(t, uint64(0), status.UnsafeL2.Number) - - // sync chain A and B - actors.Supervisor.ProcessFull(t) - - // Build L2 block on chain A - actors.ChainA.Sequencer.ActL2StartBlock(t) - actors.ChainA.Sequencer.ActL2EndBlock(t) - require.Equal(t, uint64(1), actors.ChainA.Sequencer.L2Unsafe().Number) - - // Build L2 block on chain B - actors.ChainB.Sequencer.ActL2StartBlock(t) - actors.ChainB.Sequencer.ActL2EndBlock(t) - require.Equal(t, uint64(1), actors.ChainB.Sequencer.L2Unsafe().Number) - - // Ingest the new unsafe-block events - actors.ChainA.Sequencer.SyncSupervisor(t) - actors.ChainB.Sequencer.SyncSupervisor(t) - - // Verify as cross-unsafe with supervisor - actors.Supervisor.ProcessFull(t) - actors.ChainA.Sequencer.ActL2PipelineFull(t) - status = actors.ChainA.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.UnsafeL2.Number) - require.Equal(gt, uint64(1), status.CrossUnsafeL2.Number) - actors.ChainB.Sequencer.ActL2PipelineFull(t) - status = actors.ChainB.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.UnsafeL2.Number) - require.Equal(gt, uint64(1), status.CrossUnsafeL2.Number) - - // Submit the L2 blocks, sync the local-safe data - actors.ChainA.Batcher.ActSubmitAll(t) - actors.ChainB.Batcher.ActSubmitAll(t) - actors.L1Miner.ActL1StartBlock(12)(t) - actors.L1Miner.ActL1IncludeTx(actors.ChainA.BatcherAddr)(t) - actors.L1Miner.ActL1IncludeTx(actors.ChainB.BatcherAddr)(t) - actors.L1Miner.ActL1EndBlock(t) - actors.Supervisor.SignalLatestL1(t) - // The node will exhaust L1 data, - // it needs the supervisor to see the L1 block first, and provide it to the node. - actors.ChainA.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) - actors.ChainB.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) - actors.ChainA.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 - actors.ChainB.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 - actors.ChainA.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. - actors.ChainB.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. - - actors.ChainA.Sequencer.ActL1HeadSignal(t) - status = actors.ChainA.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.LocalSafeL2.Number) - actors.ChainB.Sequencer.ActL1HeadSignal(t) - status = actors.ChainB.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.LocalSafeL2.Number) - - // Ingest the new local-safe event - actors.ChainA.Sequencer.SyncSupervisor(t) - actors.ChainB.Sequencer.SyncSupervisor(t) - - // Cross-safe verify it - actors.Supervisor.ProcessFull(t) - actors.ChainA.Sequencer.ActL2PipelineFull(t) - status = actors.ChainA.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.SafeL2.Number) - actors.ChainB.Sequencer.ActL2PipelineFull(t) - status = actors.ChainB.Sequencer.SyncStatus() - require.Equal(gt, uint64(1), status.SafeL2.Number) - - require.Equal(gt, uint64(1), actors.ChainA.Sequencer.L2Safe().Number) - require.Equal(gt, uint64(1), actors.ChainB.Sequencer.L2Safe().Number) - - chainAClient := actors.ChainA.Sequencer.RollupClient() - chainBClient := actors.ChainB.Sequencer.RollupClient() - - ctx := context.Background() - endTimestamp := actors.ChainA.RollupCfg.Genesis.L2Time + actors.ChainA.RollupCfg.BlockTime - startTimestamp := endTimestamp - 1 - source, err := NewSuperRootSource(ctx, chainAClient, chainBClient) - require.NoError(t, err) - start, err := source.CreateSuperRoot(ctx, startTimestamp) - require.NoError(t, err) - end, err := source.CreateSuperRoot(ctx, endTimestamp) - require.NoError(t, err) - - endBlockNumA, err := actors.ChainA.RollupCfg.TargetBlockNumber(endTimestamp) - require.NoError(t, err) - chain1End, err := chainAClient.OutputAtBlock(ctx, endBlockNumA) - require.NoError(t, err) - - endBlockNumB, err := actors.ChainB.RollupCfg.TargetBlockNumber(endTimestamp) - require.NoError(t, err) - chain2End, err := chainBClient.OutputAtBlock(ctx, endBlockNumB) - require.NoError(t, err) - - step1Expected := (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - }, - Step: 1, - }).Marshal() - - step2Expected := (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, - }, - Step: 2, - }).Marshal() - - paddingStep := func(step uint64) []byte { - return (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, - }, - Step: step, - }).Marshal() - } - - tests := []*transitionTest{ - { - name: "ClaimNoChange", - agreedClaim: start.Marshal(), - disputedClaim: start.Marshal(), - disputedTraceIndex: 0, - expectValid: false, - }, - { - name: "ClaimDirectToNextTimestamp", - agreedClaim: start.Marshal(), - disputedClaim: end.Marshal(), - disputedTraceIndex: 0, - expectValid: false, - }, - { - name: "FirstChainOptimisticBlock", - agreedClaim: start.Marshal(), - disputedClaim: step1Expected, - disputedTraceIndex: 0, - expectValid: true, - }, - { - name: "SecondChainOptimisticBlock", - agreedClaim: step1Expected, - disputedClaim: step2Expected, - disputedTraceIndex: 1, - expectValid: true, - }, - { - name: "FirstPaddingStep", - agreedClaim: step2Expected, - disputedClaim: paddingStep(3), - disputedTraceIndex: 2, - expectValid: true, - }, - { - name: "SecondPaddingStep", - agreedClaim: paddingStep(3), - disputedClaim: paddingStep(4), - disputedTraceIndex: 3, - expectValid: true, - }, - { - name: "LastPaddingStep", - agreedClaim: paddingStep(1022), - disputedClaim: paddingStep(1023), - disputedTraceIndex: 1022, - expectValid: true, - }, - { - name: "Consolidate-AllValid", - agreedClaim: paddingStep(1023), - disputedClaim: end.Marshal(), - disputedTraceIndex: 1023, - expectValid: true, - }, - { - name: "AlreadyAtClaimedTimestamp", - agreedClaim: end.Marshal(), - disputedClaim: end.Marshal(), - disputedTraceIndex: 5000, - expectValid: true, - }, - - { - name: "FirstChainReachesL1Head", - agreedClaim: start.Marshal(), - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 0, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - }, - { - name: "SecondChainReachesL1Head", - agreedClaim: step1Expected, - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 1, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - skipChallenger: true, // test's agreedClaim is incorrect - first chain is also invalid - }, - { - name: "SuperRootInvalidIfUnsupportedByL1Data", - agreedClaim: start.Marshal(), - disputedClaim: step1Expected, - disputedTraceIndex: 0, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: false, - }, - { - name: "FromInvalidTransitionHash", - agreedClaim: interop.InvalidTransition, - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 2, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - }, - } - - for _, test := range tests { - test := test - gt.Run(fmt.Sprintf("%s-fpp", test.name), func(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - if test.skipProgram { - t.Skip("Not yet implemented") - return - } - logger := testlog.Logger(t, slog.LevelInfo) - checkResult := fpHelpers.ExpectNoError() - if !test.expectValid { - checkResult = fpHelpers.ExpectError(claim.ErrClaimNotValid) - } - l1Head := test.l1Head - if l1Head == (common.Hash{}) { - l1Head = actors.L1Miner.L1Chain().CurrentBlock().Hash() - } - fpHelpers.RunFaultProofProgram( - t, - logger, - actors.L1Miner, - checkResult, - WithInteropEnabled(actors, test.agreedClaim, crypto.Keccak256Hash(test.disputedClaim), endTimestamp), - fpHelpers.WithL1Head(l1Head), - ) - }) - - gt.Run(fmt.Sprintf("%s-challenger", test.name), func(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - if test.skipChallenger { - t.Skip("Not yet implemented") - return - } - logger := testlog.Logger(t, slog.LevelInfo) - prestateProvider := super.NewSuperRootPrestateProvider(&actors.Supervisor.QueryFrontend, startTimestamp) - var l1Head eth.BlockID - if test.l1Head == (common.Hash{}) { - l1Head = eth.ToBlockID(eth.HeaderBlockInfo(actors.L1Miner.L1Chain().CurrentBlock())) - } else { - l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) - } - gameDepth := challengerTypes.Depth(30) - rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) - require.NoError(t, err) - provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) - var agreedPrestate []byte - if test.disputedTraceIndex > 0 { - agreedPrestate, err = provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) - require.NoError(t, err) - } else { - superRoot, err := provider.AbsolutePreState(ctx) - require.NoError(t, err) - agreedPrestate = superRoot.Marshal() - } - require.Equal(t, test.agreedClaim, agreedPrestate) - - disputedClaim, err := provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex))) - require.NoError(t, err) - if test.expectValid { - require.Equal(t, test.disputedClaim, disputedClaim, "Claim is correct so should match challenger's opinion") - } else { - require.NotEqual(t, test.disputedClaim, disputedClaim, "Claim is incorrect so should not match challenger's opinion") - } - }) - } -} - -func TestInteropFaultProofsInvalidBlock(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - - is := SetupInterop(t) - actors := is.CreateActors() - aliceA := setupUser(t, is, actors.ChainA, 0) - aliceB := setupUser(t, is, actors.ChainB, 0) - initializeChainState(t, actors) - emitTx := initializeEmitterContractTest(t, aliceA, actors) - - // Create a message with a conflicting payload - fakeMessage := []byte("this message was never emitted") - auth := newL2TxOpts(t, aliceB.secret, actors.ChainB) - id := idForTx(t, actors, emitTx) - contract, err := inbox.NewInbox(predeploys.CrossL2InboxAddr, actors.ChainB.SequencerEngine.EthClient()) - require.NoError(t, err) - execTx, err := contract.ValidateMessage(auth, id, crypto.Keccak256Hash(fakeMessage)) - require.NoError(t, err) - includeTxOnChainAndSyncWithoutCrossSafety(t, actors, actors.ChainB, execTx, aliceB.address) - - // Confirm transaction inclusion - rec, err := actors.ChainB.SequencerEngine.EthClient().TransactionReceipt(t.Ctx(), execTx.Hash()) - require.NoError(t, err) - require.NotNil(t, rec) - - // safe head is still behind until we verify cross-safe - assertHeads(t, actors.ChainA, 3, 3, 2, 2) - assertHeads(t, actors.ChainB, 3, 3, 2, 2) - endTimestamp := actors.ChainB.Sequencer.L2Unsafe().Time - - chainAClient := actors.ChainA.Sequencer.RollupClient() - chainBClient := actors.ChainB.Sequencer.RollupClient() - - ctx := context.Background() - startTimestamp := endTimestamp - 1 - source, err := NewSuperRootSource(ctx, chainAClient, chainBClient) - require.NoError(t, err) - start, err := source.CreateSuperRoot(ctx, startTimestamp) - require.NoError(t, err) - end, err := source.CreateSuperRoot(ctx, endTimestamp) - require.NoError(t, err) - - endBlockNumA, err := actors.ChainA.RollupCfg.TargetBlockNumber(endTimestamp) - require.NoError(t, err) - chain1End, err := chainAClient.OutputAtBlock(ctx, endBlockNumA) - require.NoError(t, err) - - endBlockNumB, err := actors.ChainB.RollupCfg.TargetBlockNumber(endTimestamp) - require.NoError(t, err) - chain2End, err := chainBClient.OutputAtBlock(ctx, endBlockNumB) - require.NoError(t, err) - - step1Expected := (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - }, - Step: 1, - }).Marshal() - - step2Expected := (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, - }, - Step: 2, - }).Marshal() - - paddingStep := func(step uint64) []byte { - return (&types.TransitionState{ - SuperRoot: start.Marshal(), - PendingProgress: []types.OptimisticBlock{ - {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, - {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, - }, - Step: step, - }).Marshal() - } - - // Induce block replacement - verifyCrossSafe(t, actors) - // assert that the invalid message tx was reorged out - _, err = actors.ChainB.SequencerEngine.EthClient().TransactionReceipt(t.Ctx(), execTx.Hash()) - require.ErrorIs(gt, err, ethereum.NotFound) - assertHeads(t, actors.ChainA, 3, 3, 3, 3) - assertHeads(t, actors.ChainB, 3, 3, 3, 3) - - crossSafeSuperRootEnd, err := source.CreateSuperRoot(ctx, endTimestamp) - require.NoError(t, err) - - tests := []*transitionTest{ - { - name: "FirstChainOptimisticBlock", - agreedClaim: start.Marshal(), - disputedClaim: step1Expected, - disputedTraceIndex: 0, - expectValid: true, - skipChallenger: true, - }, - { - name: "SecondChainOptimisticBlock", - agreedClaim: step1Expected, - disputedClaim: step2Expected, - disputedTraceIndex: 1, - expectValid: true, - skipChallenger: true, - }, - { - name: "FirstPaddingStep", - agreedClaim: step2Expected, - disputedClaim: paddingStep(3), - disputedTraceIndex: 2, - expectValid: true, - skipChallenger: true, - }, - { - name: "SecondPaddingStep", - agreedClaim: paddingStep(3), - disputedClaim: paddingStep(4), - disputedTraceIndex: 3, - expectValid: true, - skipChallenger: true, - }, - { - name: "LastPaddingStep", - agreedClaim: paddingStep(1022), - disputedClaim: paddingStep(1023), - disputedTraceIndex: 1022, - expectValid: true, - skipChallenger: true, - }, - { - name: "Consolidate-ExpectInvalidPendingBlock", - agreedClaim: paddingStep(1023), - disputedClaim: end.Marshal(), - disputedTraceIndex: 1023, - expectValid: false, - skipProgram: true, - skipChallenger: true, - }, - { - name: "Consolidate-ReplaceInvalidBlock", - agreedClaim: paddingStep(1023), - disputedClaim: crossSafeSuperRootEnd.Marshal(), - disputedTraceIndex: 1023, - expectValid: true, - skipProgram: true, - skipChallenger: true, - }, - { - name: "Consolidate-ReplaceBlockInvalidatedByFirstInvalidatedBlock", - // Will need to generate an invalid block before this can be enabled - // Check that if a block B depends on a log in block A, and block A is found to have an invalid message - // that block B is also replaced with a deposit only block because A no longer contains the log it needs - skipProgram: true, - skipChallenger: true, - }, - { - name: "AlreadyAtClaimedTimestamp", - agreedClaim: crossSafeSuperRootEnd.Marshal(), - disputedClaim: crossSafeSuperRootEnd.Marshal(), - disputedTraceIndex: 5000, - expectValid: true, - }, - - { - name: "FirstChainReachesL1Head", - agreedClaim: start.Marshal(), - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 0, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - skipChallenger: true, // Challenger doesn't yet check if blocks were safe - }, - { - name: "SecondChainReachesL1Head", - agreedClaim: step1Expected, - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 1, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - skipChallenger: true, // Challenger doesn't yet check if blocks were safe - }, - { - name: "SuperRootInvalidIfUnsupportedByL1Data", - agreedClaim: step1Expected, - disputedClaim: step2Expected, - disputedTraceIndex: 1, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: false, - skipChallenger: true, // Challenger doesn't yet check if blocks were safe - }, - { - name: "FromInvalidTransitionHash", - agreedClaim: interop.InvalidTransition, - disputedClaim: interop.InvalidTransition, - disputedTraceIndex: 2, - // The derivation reaches the L1 head before the next block can be created - l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), - expectValid: true, - skipChallenger: true, // Challenger doesn't yet check if blocks were safe - }, - } - - for _, test := range tests { - test := test - gt.Run(fmt.Sprintf("%s-fpp", test.name), func(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - if test.skipProgram { - t.Skip("Not yet implemented") - return - } - logger := testlog.Logger(t, slog.LevelInfo) - checkResult := fpHelpers.ExpectNoError() - if !test.expectValid { - checkResult = fpHelpers.ExpectError(claim.ErrClaimNotValid) - } - l1Head := test.l1Head - if l1Head == (common.Hash{}) { - l1Head = actors.L1Miner.L1Chain().CurrentBlock().Hash() - } - fpHelpers.RunFaultProofProgram( - t, - logger, - actors.L1Miner, - checkResult, - WithInteropEnabled(actors, test.agreedClaim, crypto.Keccak256Hash(test.disputedClaim), endTimestamp), - fpHelpers.WithL1Head(l1Head), - ) - }) - - gt.Run(fmt.Sprintf("%s-challenger", test.name), func(gt *testing.T) { - t := helpers.NewDefaultTesting(gt) - if test.skipChallenger { - t.Skip("Not yet implemented") - return - } - logger := testlog.Logger(t, slog.LevelInfo) - prestateProvider := super.NewSuperRootPrestateProvider(&actors.Supervisor.QueryFrontend, startTimestamp) - var l1Head eth.BlockID - if test.l1Head == (common.Hash{}) { - l1Head = eth.ToBlockID(eth.HeaderBlockInfo(actors.L1Miner.L1Chain().CurrentBlock())) - } else { - l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) - } - gameDepth := challengerTypes.Depth(30) - rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) - require.NoError(t, err) - provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) - var agreedPrestate []byte - if test.disputedTraceIndex > 0 { - agreedPrestate, err = provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) - require.NoError(t, err) - } else { - superRoot, err := provider.AbsolutePreState(ctx) - require.NoError(t, err) - agreedPrestate = superRoot.Marshal() - } - require.Equal(t, test.agreedClaim, agreedPrestate) - - disputedClaim, err := provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex))) - require.NoError(t, err) - if test.expectValid { - require.Equal(t, test.disputedClaim, disputedClaim, "Claim is correct so should match challenger's opinion") - } else { - require.NotEqual(t, test.disputedClaim, disputedClaim, "Claim is incorrect so should not match challenger's opinion") - } - }) - } -} - -func includeTxOnChainAndSyncWithoutCrossSafety(t helpers.Testing, actors *InteropActors, chain *Chain, tx *gethTypes.Transaction, sender common.Address) { - // Advance both chains - chain.Sequencer.ActL2StartBlock(t) - if tx != nil { - err := chain.SequencerEngine.EngineApi.IncludeTx(tx, sender) - require.NoError(t, err) - } - chain.Sequencer.ActL2EndBlock(t) - - cross := actors.ChainA - if chain == actors.ChainA { - cross = actors.ChainB - } - cross.Sequencer.ActL2StartBlock(t) - cross.Sequencer.ActL2EndBlock(t) - - // Sync the chain and the supervisor - chain.Sequencer.SyncSupervisor(t) - actors.Supervisor.ProcessFull(t) - - // Add to L1 - actors.ChainA.Batcher.ActSubmitAll(t) - actors.ChainB.Batcher.ActSubmitAll(t) - actors.L1Miner.ActL1StartBlock(12)(t) - actors.L1Miner.ActL1IncludeTx(actors.ChainA.BatcherAddr)(t) - actors.L1Miner.ActL1IncludeTx(actors.ChainB.BatcherAddr)(t) - actors.L1Miner.ActL1EndBlock(t) - - // Complete L1 data processing - actors.ChainA.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) - actors.ChainB.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) - actors.Supervisor.SignalLatestL1(t) - actors.ChainA.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 - actors.ChainB.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 - actors.ChainA.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. - actors.ChainB.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. - - // Ingest the new local-safe event - actors.ChainA.Sequencer.SyncSupervisor(t) - actors.ChainB.Sequencer.SyncSupervisor(t) -} - -func verifyCrossSafe(t helpers.Testing, actors *InteropActors) { - actors.Supervisor.ProcessFull(t) - actors.ChainA.Sequencer.ActL2PipelineFull(t) - actors.ChainB.Sequencer.ActL2PipelineFull(t) - // another round-trip, for post-processing like cross-safe / cross-unsafe to propagate to the op-node - actors.ChainA.Sequencer.SyncSupervisor(t) - actors.ChainB.Sequencer.SyncSupervisor(t) - actors.Supervisor.ProcessFull(t) - actors.ChainA.Sequencer.ActL2PipelineFull(t) - actors.ChainB.Sequencer.ActL2PipelineFull(t) -} - -func WithInteropEnabled(actors *InteropActors, agreedPrestate []byte, disputedClaim common.Hash, claimTimestamp uint64) fpHelpers.FixtureInputParam { - return func(f *fpHelpers.FixtureInputs) { - f.InteropEnabled = true - f.AgreedPrestate = agreedPrestate - f.L2OutputRoot = crypto.Keccak256Hash(agreedPrestate) - f.L2Claim = disputedClaim - f.L2BlockNumber = claimTimestamp - - for _, chain := range []*Chain{actors.ChainA, actors.ChainB} { - f.L2Sources = append(f.L2Sources, &fpHelpers.FaultProofProgramL2Source{ - Node: chain.Sequencer.L2Verifier, - Engine: chain.SequencerEngine, - ChainConfig: chain.L2Genesis.Config, - }) - } - } -} - -type transitionTest struct { - name string - agreedClaim []byte - disputedClaim []byte - disputedTraceIndex int64 - l1Head common.Hash // Defaults to current L1 head if not set - expectValid bool - skipProgram bool - skipChallenger bool -} diff --git a/op-e2e/actions/interop/proofs_test.go b/op-e2e/actions/interop/proofs_test.go new file mode 100644 index 00000000000..02ebbea43cf --- /dev/null +++ b/op-e2e/actions/interop/proofs_test.go @@ -0,0 +1,549 @@ +package interop + +import ( + "fmt" + "log/slog" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/super" + challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + fpHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" + "github.com/ethereum-optimism/optimism/op-program/client/claim" + "github.com/ethereum-optimism/optimism/op-program/client/interop" + "github.com/ethereum-optimism/optimism/op-program/client/interop/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func TestInteropFaultProofs(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + dsl := NewInteropDSL(t) + + dsl.AddL2Block(dsl.Actors.ChainA) + dsl.AddL2Block(dsl.Actors.ChainB) + + // Submit batch data for each chain in separate L1 blocks so tests can have one chain safe and one unsafe + dsl.SubmitBatchData(func(opts *SubmitBatchDataOpts) { + opts.SetChains(dsl.Actors.ChainA) + }) + dsl.SubmitBatchData(func(opts *SubmitBatchDataOpts) { + opts.SetChains(dsl.Actors.ChainB) + }) + + actors := dsl.Actors + + endTimestamp := actors.ChainA.RollupCfg.Genesis.L2Time + actors.ChainA.RollupCfg.BlockTime + startTimestamp := endTimestamp - 1 + + start := dsl.SuperRoot(startTimestamp) + end := dsl.SuperRoot(endTimestamp) + + chain1End := dsl.OutputRootAtTimestamp(actors.ChainA, endTimestamp) + chain2End := dsl.OutputRootAtTimestamp(actors.ChainB, endTimestamp) + + step1Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + }, + Step: 1, + }).Marshal() + + step2Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: 2, + }).Marshal() + + paddingStep := func(step uint64) []byte { + return (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: step, + }).Marshal() + } + + tests := []*transitionTest{ + { + name: "ClaimNoChange", + agreedClaim: start.Marshal(), + disputedClaim: start.Marshal(), + disputedTraceIndex: 0, + expectValid: false, + }, + { + name: "ClaimDirectToNextTimestamp", + agreedClaim: start.Marshal(), + disputedClaim: end.Marshal(), + disputedTraceIndex: 0, + expectValid: false, + }, + { + name: "FirstChainOptimisticBlock", + agreedClaim: start.Marshal(), + disputedClaim: step1Expected, + disputedTraceIndex: 0, + expectValid: true, + }, + { + name: "SecondChainOptimisticBlock", + agreedClaim: step1Expected, + disputedClaim: step2Expected, + disputedTraceIndex: 1, + expectValid: true, + }, + { + name: "FirstPaddingStep", + agreedClaim: step2Expected, + disputedClaim: paddingStep(3), + disputedTraceIndex: 2, + expectValid: true, + }, + { + name: "SecondPaddingStep", + agreedClaim: paddingStep(3), + disputedClaim: paddingStep(4), + disputedTraceIndex: 3, + expectValid: true, + }, + { + name: "LastPaddingStep", + agreedClaim: paddingStep(1022), + disputedClaim: paddingStep(1023), + disputedTraceIndex: 1022, + expectValid: true, + }, + { + name: "Consolidate-AllValid", + agreedClaim: paddingStep(1023), + disputedClaim: end.Marshal(), + disputedTraceIndex: 1023, + expectValid: true, + }, + { + name: "AlreadyAtClaimedTimestamp", + agreedClaim: end.Marshal(), + disputedClaim: end.Marshal(), + disputedTraceIndex: 5000, + expectValid: true, + }, + + { + name: "FirstChainReachesL1Head", + agreedClaim: start.Marshal(), + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 0, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + }, + { + name: "SecondChainReachesL1Head", + agreedClaim: step1Expected, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 1, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().GetCanonicalHash(1), + expectValid: true, + }, + { + name: "SuperRootInvalidIfUnsupportedByL1Data", + agreedClaim: start.Marshal(), + disputedClaim: step1Expected, + disputedTraceIndex: 0, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: false, + }, + { + name: "FromInvalidTransitionHash", + agreedClaim: interop.InvalidTransition, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 2, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + }, + } + + for _, test := range tests { + test := test + gt.Run(fmt.Sprintf("%s-fpp", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipProgram { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + checkResult := fpHelpers.ExpectNoError() + if !test.expectValid { + checkResult = fpHelpers.ExpectError(claim.ErrClaimNotValid) + } + l1Head := test.l1Head + if l1Head == (common.Hash{}) { + l1Head = actors.L1Miner.L1Chain().CurrentBlock().Hash() + } + fpHelpers.RunFaultProofProgram( + t, + logger, + actors.L1Miner, + checkResult, + WithInteropEnabled(actors, test.agreedClaim, crypto.Keccak256Hash(test.disputedClaim), endTimestamp), + fpHelpers.WithL1Head(l1Head), + ) + }) + + gt.Run(fmt.Sprintf("%s-challenger", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipChallenger { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + prestateProvider := super.NewSuperRootPrestateProvider(&actors.Supervisor.QueryFrontend, startTimestamp) + var l1Head eth.BlockID + if test.l1Head == (common.Hash{}) { + l1Head = eth.ToBlockID(eth.HeaderBlockInfo(actors.L1Miner.L1Chain().CurrentBlock())) + } else { + l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) + } + gameDepth := challengerTypes.Depth(30) + rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) + require.NoError(t, err) + provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) + var agreedPrestate []byte + if test.disputedTraceIndex > 0 { + agreedPrestate, err = provider.GetPreimageBytes(t.Ctx(), challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) + require.NoError(t, err) + } else { + superRoot, err := provider.AbsolutePreState(t.Ctx()) + require.NoError(t, err) + agreedPrestate = superRoot.Marshal() + } + require.Equal(t, test.agreedClaim, agreedPrestate) + + disputedClaim, err := provider.GetPreimageBytes(t.Ctx(), challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex))) + require.NoError(t, err) + if test.expectValid { + require.Equal(t, test.disputedClaim, disputedClaim, "Claim is correct so should match challenger's opinion") + } else { + require.NotEqual(t, test.disputedClaim, disputedClaim, "Claim is incorrect so should not match challenger's opinion") + } + }) + } +} + +func TestInteropFaultProofsInvalidBlock(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + + dsl := NewInteropDSL(t) + + actors := dsl.Actors + alice := dsl.CreateUser() + emitterContract := NewEmitterContract(t) + dsl.AddL2Block(actors.ChainA, WithL2BlockTransactions( + emitterContract.Deploy(alice), + )) + dsl.AddL2Block(actors.ChainA, WithL2BlockTransactions( + emitterContract.EmitMessage(alice, "test message"), + )) + emitTx := emitterContract.LastEmittedMessage() + + // Bring ChainB to the same height and timestamp + dsl.AddL2Block(actors.ChainB) + dsl.AddL2Block(actors.ChainB) + dsl.SubmitBatchData() + + // Create a message with a conflicting payload + fakeMessage := []byte("this message was never emitted") + inboxContract := NewInboxContract(t) + dsl.AddL2Block(actors.ChainB, func(opts *AddL2BlockOpts) { + opts.TransactionCreators = []TransactionCreator{inboxContract.Execute(alice, emitTx.Identifier(), fakeMessage)} + opts.BlockIsNotCrossSafe = true + }) + dsl.AddL2Block(actors.ChainA) + + // TODO: I wonder if it would be better to have `opts.ExpectInvalid` that specifies the invalid tx + // then the DSL can assert that it becomes local safe and is then reorged out automatically + // We could still grab the superroot and output roots for the invalid block while it is unsafe + // Other tests may still want to have SkipCrossUnsafeUpdate but generally nicer to be more declarative and + // high level to avoid leaking the details of when supervisor will trigger the reorg if possible. + dsl.SubmitBatchData(func(opts *SubmitBatchDataOpts) { + opts.SkipCrossSafeUpdate = true + }) + + execTx := inboxContract.LastTransaction() + execTx.CheckIncluded() + + // safe head is still behind until we verify cross-safe + assertHeads(t, actors.ChainA, 3, 3, 3, 2) // Chain A's block is cross unsafe + assertHeads(t, actors.ChainB, 3, 3, 2, 2) // Chain B's block is not + endTimestamp := actors.ChainB.Sequencer.L2Unsafe().Time + + startTimestamp := endTimestamp - 1 + start := dsl.SuperRoot(startTimestamp) + end := dsl.SuperRoot(endTimestamp) + + chain1End := dsl.OutputRootAtTimestamp(actors.ChainA, endTimestamp) + chain2End := dsl.OutputRootAtTimestamp(actors.ChainB, endTimestamp) + + step1Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + }, + Step: 1, + }).Marshal() + + step2Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: 2, + }).Marshal() + + paddingStep := func(step uint64) []byte { + return (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: step, + }).Marshal() + } + + // Induce block replacement + dsl.ProcessCrossSafe() + // assert that the invalid message tx was reorged out + execTx.CheckNotIncluded() + assertHeads(t, actors.ChainA, 3, 3, 3, 3) + assertHeads(t, actors.ChainB, 3, 3, 3, 3) + + crossSafeSuperRootEnd := dsl.SuperRoot(endTimestamp) + + tests := []*transitionTest{ + { + name: "FirstChainOptimisticBlock", + agreedClaim: start.Marshal(), + disputedClaim: step1Expected, + disputedTraceIndex: 0, + expectValid: true, + skipChallenger: true, + }, + { + name: "SecondChainOptimisticBlock", + agreedClaim: step1Expected, + disputedClaim: step2Expected, + disputedTraceIndex: 1, + expectValid: true, + skipChallenger: true, + }, + { + name: "FirstPaddingStep", + agreedClaim: step2Expected, + disputedClaim: paddingStep(3), + disputedTraceIndex: 2, + expectValid: true, + skipChallenger: true, + }, + { + name: "SecondPaddingStep", + agreedClaim: paddingStep(3), + disputedClaim: paddingStep(4), + disputedTraceIndex: 3, + expectValid: true, + skipChallenger: true, + }, + { + name: "LastPaddingStep", + agreedClaim: paddingStep(1022), + disputedClaim: paddingStep(1023), + disputedTraceIndex: 1022, + expectValid: true, + skipChallenger: true, + }, + { + name: "Consolidate-ExpectInvalidPendingBlock", + agreedClaim: paddingStep(1023), + disputedClaim: end.Marshal(), + disputedTraceIndex: 1023, + expectValid: false, + skipProgram: true, + skipChallenger: true, + }, + { + name: "Consolidate-ReplaceInvalidBlock", + agreedClaim: paddingStep(1023), + disputedClaim: crossSafeSuperRootEnd.Marshal(), + disputedTraceIndex: 1023, + expectValid: true, + skipProgram: true, + skipChallenger: true, + }, + { + name: "Consolidate-ReplaceBlockInvalidatedByFirstInvalidatedBlock", + // Will need to generate an invalid block before this can be enabled + // Check that if a block B depends on a log in block A, and block A is found to have an invalid message + // that block B is also replaced with a deposit only block because A no longer contains the log it needs + skipProgram: true, + skipChallenger: true, + }, + { + name: "AlreadyAtClaimedTimestamp", + agreedClaim: crossSafeSuperRootEnd.Marshal(), + disputedClaim: crossSafeSuperRootEnd.Marshal(), + disputedTraceIndex: 5000, + expectValid: true, + }, + + { + name: "FirstChainReachesL1Head", + agreedClaim: start.Marshal(), + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 0, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + skipChallenger: true, // Challenger doesn't yet check if blocks were safe + }, + { + name: "SecondChainReachesL1Head", + agreedClaim: step1Expected, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 1, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + skipChallenger: true, // Challenger doesn't yet check if blocks were safe + }, + { + name: "SuperRootInvalidIfUnsupportedByL1Data", + agreedClaim: step1Expected, + disputedClaim: step2Expected, + disputedTraceIndex: 1, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: false, + skipChallenger: true, // Challenger doesn't yet check if blocks were safe + }, + { + name: "FromInvalidTransitionHash", + agreedClaim: interop.InvalidTransition, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 2, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + skipChallenger: true, // Challenger doesn't yet check if blocks were safe + }, + } + + for _, test := range tests { + test := test + gt.Run(fmt.Sprintf("%s-fpp", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipProgram { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + checkResult := fpHelpers.ExpectNoError() + if !test.expectValid { + checkResult = fpHelpers.ExpectError(claim.ErrClaimNotValid) + } + l1Head := test.l1Head + if l1Head == (common.Hash{}) { + l1Head = actors.L1Miner.L1Chain().CurrentBlock().Hash() + } + fpHelpers.RunFaultProofProgram( + t, + logger, + actors.L1Miner, + checkResult, + WithInteropEnabled(actors, test.agreedClaim, crypto.Keccak256Hash(test.disputedClaim), endTimestamp), + fpHelpers.WithL1Head(l1Head), + ) + }) + + gt.Run(fmt.Sprintf("%s-challenger", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipChallenger { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + prestateProvider := super.NewSuperRootPrestateProvider(&actors.Supervisor.QueryFrontend, startTimestamp) + var l1Head eth.BlockID + if test.l1Head == (common.Hash{}) { + l1Head = eth.ToBlockID(eth.HeaderBlockInfo(actors.L1Miner.L1Chain().CurrentBlock())) + } else { + l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) + } + gameDepth := challengerTypes.Depth(30) + rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) + require.NoError(t, err) + provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) + var agreedPrestate []byte + if test.disputedTraceIndex > 0 { + agreedPrestate, err = provider.GetPreimageBytes(t.Ctx(), challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) + require.NoError(t, err) + } else { + superRoot, err := provider.AbsolutePreState(t.Ctx()) + require.NoError(t, err) + agreedPrestate = superRoot.Marshal() + } + require.Equal(t, test.agreedClaim, agreedPrestate) + + disputedClaim, err := provider.GetPreimageBytes(t.Ctx(), challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex))) + require.NoError(t, err) + if test.expectValid { + require.Equal(t, test.disputedClaim, disputedClaim, "Claim is correct so should match challenger's opinion") + } else { + require.NotEqual(t, test.disputedClaim, disputedClaim, "Claim is incorrect so should not match challenger's opinion") + } + }) + } +} + +func WithInteropEnabled(actors *InteropActors, agreedPrestate []byte, disputedClaim common.Hash, claimTimestamp uint64) fpHelpers.FixtureInputParam { + return func(f *fpHelpers.FixtureInputs) { + f.InteropEnabled = true + f.AgreedPrestate = agreedPrestate + f.L2OutputRoot = crypto.Keccak256Hash(agreedPrestate) + f.L2Claim = disputedClaim + f.L2BlockNumber = claimTimestamp + + for _, chain := range []*Chain{actors.ChainA, actors.ChainB} { + f.L2Sources = append(f.L2Sources, &fpHelpers.FaultProofProgramL2Source{ + Node: chain.Sequencer.L2Verifier, + Engine: chain.SequencerEngine, + ChainConfig: chain.L2Genesis.Config, + }) + } + } +} + +type transitionTest struct { + name string + agreedClaim []byte + disputedClaim []byte + disputedTraceIndex int64 + l1Head common.Hash // Defaults to current L1 head if not set + expectValid bool + skipProgram bool + skipChallenger bool +} diff --git a/op-e2e/actions/interop/transactions.go b/op-e2e/actions/interop/transactions.go new file mode 100644 index 00000000000..36b66694938 --- /dev/null +++ b/op-e2e/actions/interop/transactions.go @@ -0,0 +1,53 @@ +package interop + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/interop/contracts/bindings/inbox" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +type GeneratedTransaction struct { + t helpers.Testing + chain *Chain + tx *types.Transaction +} + +func NewGeneratedTransaction(t helpers.Testing, chain *Chain, tx *types.Transaction) *GeneratedTransaction { + return &GeneratedTransaction{ + t: t, + chain: chain, + tx: tx, + } +} + +func (m *GeneratedTransaction) Identifier() inbox.Identifier { + rcpt, err := m.chain.SequencerEngine.EthClient().TransactionReceipt(m.t.Ctx(), m.tx.Hash()) + require.NoError(m.t, err) + block, err := m.chain.SequencerEngine.EthClient().BlockByHash(m.t.Ctx(), rcpt.BlockHash) + require.NoError(m.t, err) + require.NotZero(m.t, len(rcpt.Logs), "Transaction did not include any logs to reference") + + return inbox.Identifier{ + Origin: *m.tx.To(), + BlockNumber: rcpt.BlockNumber, + LogIndex: new(big.Int).SetUint64(uint64(rcpt.Logs[0].Index)), + Timestamp: new(big.Int).SetUint64(block.Time()), + ChainId: m.chain.RollupCfg.L2ChainID, + } +} + +func (m *GeneratedTransaction) CheckIncluded() { + rcpt, err := m.chain.SequencerEngine.EthClient().TransactionReceipt(m.t.Ctx(), m.tx.Hash()) + require.NoError(m.t, err) + require.NotNil(m.t, rcpt) +} + +func (m *GeneratedTransaction) CheckNotIncluded() { + rcpt, err := m.chain.SequencerEngine.EthClient().TransactionReceipt(m.t.Ctx(), m.tx.Hash()) + require.ErrorIs(m.t, err, ethereum.NotFound) + require.Nil(m.t, rcpt) +} diff --git a/op-e2e/system/e2esys/setup.go b/op-e2e/system/e2esys/setup.go index 9934abe38c2..a5ffe01f793 100644 --- a/op-e2e/system/e2esys/setup.go +++ b/op-e2e/system/e2esys/setup.go @@ -196,6 +196,7 @@ func RegolithSystemConfig(t *testing.T, regolithTimeOffset *hexutil.Uint64, opts cfg.DeployConfig.L2GenesisFjordTimeOffset = nil cfg.DeployConfig.L2GenesisGraniteTimeOffset = nil cfg.DeployConfig.L2GenesisHoloceneTimeOffset = nil + cfg.DeployConfig.L2GenesisIsthmusTimeOffset = nil // ADD NEW FORKS HERE! return cfg } @@ -238,6 +239,12 @@ func HoloceneSystemConfig(t *testing.T, holoceneTimeOffset *hexutil.Uint64, opts return cfg } +func IsthmusSystemConfig(t *testing.T, isthmusTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := HoloceneSystemConfig(t, &genesisTime, opts...) + cfg.DeployConfig.L1PragueTimeOffset = isthmusTimeOffset + cfg.DeployConfig.L2GenesisIsthmusTimeOffset = isthmusTimeOffset + return cfg +} func writeDefaultJWT(t testing.TB) string { // Sadly the geth node config cannot load JWT secret from memory, it has to be a file jwtPath := path.Join(t.TempDir(), "jwt_secret") diff --git a/op-node/rollup/attributes/attributes.go b/op-node/rollup/attributes/attributes.go index 297a0680d03..16cfc2e9d29 100644 --- a/op-node/rollup/attributes/attributes.go +++ b/op-node/rollup/attributes/attributes.go @@ -193,9 +193,9 @@ func (eq *AttributesHandler) consolidateNextSafeAttributes(attributes *derive.At return } eq.emitter.Emit(engine.PromotePendingSafeEvent{ - Ref: ref, - Concluding: attributes.Concluding, - DerivedFrom: attributes.DerivedFrom, + Ref: ref, + Concluding: attributes.Concluding, + Source: attributes.DerivedFrom, }) } diff --git a/op-node/rollup/attributes/attributes_test.go b/op-node/rollup/attributes/attributes_test.go index 69880885a8e..65315fbba11 100644 --- a/op-node/rollup/attributes/attributes_test.go +++ b/op-node/rollup/attributes/attributes_test.go @@ -299,9 +299,9 @@ func TestAttributesHandler(t *testing.T) { l2.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) emitter.ExpectOnce(engine.PromotePendingSafeEvent{ - Ref: refA1, - Concluding: concluding, - DerivedFrom: refB, + Ref: refA1, + Concluding: concluding, + Source: refB, }) ah.OnEvent(engine.PendingSafeUpdateEvent{ PendingSafe: refA0, diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 276746719df..67d0724b1c5 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -353,7 +353,7 @@ func (s *SyncDeriver) OnEvent(ev event.Event) bool { func (s *SyncDeriver) onSafeDerivedBlock(x engine.SafeDerivedEvent) { if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() { - if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, x.DerivedFrom.ID()); err != nil { + if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, x.Source.ID()); err != nil { // At this point our state is in a potentially inconsistent state as we've updated the safe head // in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back // a little (it always rolls back at least 1 block) and then it will retry storing the entry diff --git a/op-node/rollup/engine/events.go b/op-node/rollup/engine/events.go index 1ecbf439fce..19578b94e66 100644 --- a/op-node/rollup/engine/events.go +++ b/op-node/rollup/engine/events.go @@ -15,10 +15,10 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) -// ReplaceBlockDerivedFrom is a magic value for the "DerivedFrom" attribute, +// ReplaceBlockSource is a magic value for the "Source" attribute, // used when a L2 block is a replacement of an invalidated block. // After the replacement has been processed, a reset is performed to derive the next L2 blocks. -var ReplaceBlockDerivedFrom = eth.L1BlockRef{ +var ReplaceBlockSource = eth.L1BlockRef{ Hash: common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), Number: ^uint64(0), ParentHash: common.Hash{}, @@ -110,9 +110,9 @@ func (ev PendingSafeUpdateEvent) String() string { // PromotePendingSafeEvent signals that a block can be marked as pending-safe, and/or safe. type PromotePendingSafeEvent struct { - Ref eth.L2BlockRef - Concluding bool // Concludes the pending phase, so can be promoted to (local) safe - DerivedFrom eth.L1BlockRef + Ref eth.L2BlockRef + Concluding bool // Concludes the pending phase, so can be promoted to (local) safe + Source eth.L1BlockRef } func (ev PromotePendingSafeEvent) String() string { @@ -121,8 +121,8 @@ func (ev PromotePendingSafeEvent) String() string { // PromoteLocalSafeEvent signals that a block can be promoted to local-safe. type PromoteLocalSafeEvent struct { - Ref eth.L2BlockRef - DerivedFrom eth.L1BlockRef + Ref eth.L2BlockRef + Source eth.L1BlockRef } func (ev PromoteLocalSafeEvent) String() string { @@ -147,8 +147,8 @@ func (ev CrossSafeUpdateEvent) String() string { // LocalSafeUpdateEvent signals that a block is now considered to be local-safe. type LocalSafeUpdateEvent struct { - Ref eth.L2BlockRef - DerivedFrom eth.L1BlockRef + Ref eth.L2BlockRef + Source eth.L1BlockRef } func (ev LocalSafeUpdateEvent) String() string { @@ -157,8 +157,8 @@ func (ev LocalSafeUpdateEvent) String() string { // PromoteSafeEvent signals that a block can be promoted to cross-safe. type PromoteSafeEvent struct { - Ref eth.L2BlockRef - DerivedFrom eth.L1BlockRef + Ref eth.L2BlockRef + Source eth.L1BlockRef } func (ev PromoteSafeEvent) String() string { @@ -168,8 +168,8 @@ func (ev PromoteSafeEvent) String() string { // SafeDerivedEvent signals that a block was determined to be safe, and derived from the given L1 block. // This is signaled upon successful processing of PromoteSafeEvent. type SafeDerivedEvent struct { - Safe eth.L2BlockRef - DerivedFrom eth.L1BlockRef + Safe eth.L2BlockRef + Source eth.L1BlockRef } func (ev SafeDerivedEvent) String() string { @@ -482,8 +482,8 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { } if x.Concluding && x.Ref.Number > d.ec.LocalSafeL2Head().Number { d.emitter.Emit(PromoteLocalSafeEvent{ - Ref: x.Ref, - DerivedFrom: x.DerivedFrom, + Ref: x.Ref, + Source: x.Source, }) } case PromoteLocalSafeEvent: @@ -499,7 +499,7 @@ func (d *EngDeriver) OnEvent(ev event.Event) bool { d.log.Debug("Updating safe", "safe", x.Ref, "unsafe", d.ec.UnsafeL2Head()) d.ec.SetSafeHead(x.Ref) // Finalizer can pick up this safe cross-block now - d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, DerivedFrom: x.DerivedFrom}) + d.emitter.Emit(SafeDerivedEvent{Safe: x.Ref, Source: x.Source}) d.emitter.Emit(CrossSafeUpdateEvent{ CrossSafe: d.ec.SafeL2Head(), LocalSafe: d.ec.LocalSafeL2Head(), diff --git a/op-node/rollup/engine/payload_success.go b/op-node/rollup/engine/payload_success.go index dfafe5c7124..433acb8915b 100644 --- a/op-node/rollup/engine/payload_success.go +++ b/op-node/rollup/engine/payload_success.go @@ -24,7 +24,7 @@ func (ev PayloadSuccessEvent) String() string { } func (eq *EngDeriver) onPayloadSuccess(ev PayloadSuccessEvent) { - if ev.DerivedFrom == ReplaceBlockDerivedFrom { + if ev.DerivedFrom == ReplaceBlockSource { eq.log.Warn("Successfully built replacement block, resetting chain to continue now", "replacement", ev.Ref) // Change the engine state to make the replacement block the cross-safe head of the chain, // And continue syncing from there. @@ -49,9 +49,9 @@ func (eq *EngDeriver) onPayloadSuccess(ev PayloadSuccessEvent) { // If derived from L1, then it can be considered (pending) safe if ev.DerivedFrom != (eth.L1BlockRef{}) { eq.emitter.Emit(PromotePendingSafeEvent{ - Ref: ev.Ref, - Concluding: ev.Concluding, - DerivedFrom: ev.DerivedFrom, + Ref: ev.Ref, + Concluding: ev.Concluding, + Source: ev.DerivedFrom, }) } diff --git a/op-node/rollup/finality/altda_test.go b/op-node/rollup/finality/altda_test.go index e100cc892d3..7c1241aaf8c 100644 --- a/op-node/rollup/finality/altda_test.go +++ b/op-node/rollup/finality/altda_test.go @@ -132,7 +132,7 @@ func TestAltDAFinalityData(t *testing.T) { L1Origin: previous.ID(), // reference previous origin, not the block the batch was included in SequenceNumber: j, } - fi.OnEvent(engine.SafeDerivedEvent{Safe: l2parent, DerivedFrom: l1parent}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: l2parent, Source: l1parent}) emitter.AssertExpectations(t) } // might trigger finalization attempt, if expired finality delay diff --git a/op-node/rollup/finality/finalizer.go b/op-node/rollup/finality/finalizer.go index fb49bf7afd0..3dcbd1563bb 100644 --- a/op-node/rollup/finality/finalizer.go +++ b/op-node/rollup/finality/finalizer.go @@ -142,7 +142,7 @@ func (fi *Finalizer) OnEvent(ev event.Event) bool { case FinalizeL1Event: fi.onL1Finalized(x.FinalizedL1) case engine.SafeDerivedEvent: - fi.onDerivedSafeBlock(x.Safe, x.DerivedFrom) + fi.onDerivedSafeBlock(x.Safe, x.Source) case derive.DeriverIdleEvent: fi.onDerivationIdle(x.Origin) case rollup.ResetEvent: diff --git a/op-node/rollup/finality/finalizer_test.go b/op-node/rollup/finality/finalizer_test.go index e65b21abf62..91ad3b76720 100644 --- a/op-node/rollup/finality/finalizer_test.go +++ b/op-node/rollup/finality/finalizer_test.go @@ -195,12 +195,12 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.AttachEmitter(emitter) // now say C1 was included in D and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, Source: refD}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) emitter.AssertExpectations(t) // now say D0 was included in E and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, Source: refE}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) emitter.AssertExpectations(t) @@ -230,12 +230,12 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.AttachEmitter(emitter) // now say C1 was included in D and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, Source: refD}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) emitter.AssertExpectations(t) // now say D0 was included in E and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, Source: refE}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) emitter.AssertExpectations(t) @@ -269,11 +269,11 @@ func TestEngineQueue_Finalize(t *testing.T) { fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) fi.AttachEmitter(emitter) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, Source: refD}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) emitter.AssertExpectations(t) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, DerivedFrom: refE}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refD0, Source: refE}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) emitter.AssertExpectations(t) @@ -312,11 +312,11 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.OnEvent(derive.DeriverIdleEvent{Origin: refG}) emitter.AssertExpectations(t) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refD1, DerivedFrom: refH}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refE0, DerivedFrom: refH}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refE1, DerivedFrom: refH}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refF0, DerivedFrom: refH}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refF1, DerivedFrom: refH}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refD1, Source: refH}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refE0, Source: refH}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refE1, Source: refH}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refF0, Source: refH}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refF1, Source: refH}) emitter.AssertExpectations(t) // above updates add data, but no attempt is made until idle or L1 signal // We recently finalized already, and there is no new L1 finality data @@ -356,12 +356,12 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.AttachEmitter(emitter) // now say B1 was included in C and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, DerivedFrom: refC}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, Source: refC}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refC}) emitter.AssertExpectations(t) // now say C0 was included in E and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, DerivedFrom: refE}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, Source: refE}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) emitter.AssertExpectations(t) @@ -393,7 +393,7 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.AttachEmitter(emitter) // now say B1 was included in C and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, DerivedFrom: refC}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refB1, Source: refC}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refC}) emitter.AssertExpectations(t) @@ -420,8 +420,8 @@ func TestEngineQueue_Finalize(t *testing.T) { ParentHash: refC.Hash, Time: refC.Time + l1Time, } - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0Alt, DerivedFrom: refDAlt}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1Alt, DerivedFrom: refDAlt}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0Alt, Source: refDAlt}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1Alt, Source: refDAlt}) // We get an early finality signal for F, of the chain that did not include refC0Alt and refC1Alt, // as L1 block F does not build on DAlt. @@ -457,7 +457,7 @@ func TestEngineQueue_Finalize(t *testing.T) { emitter.AssertExpectations(t) // no new finality // Include C0 in E - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, DerivedFrom: refE}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, Source: refE}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refE}) // Due to the "finalityDelay" we don't repeat finality checks shortly after one another, // and don't expect a finality attempt. @@ -489,8 +489,8 @@ func TestEngineQueue_Finalize(t *testing.T) { fi.AttachEmitter(emitter) // now say C0 and C1 were included in D and became the new safe head - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, DerivedFrom: refD}) - fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, DerivedFrom: refD}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC0, Source: refD}) + fi.OnEvent(engine.SafeDerivedEvent{Safe: refC1, Source: refD}) fi.OnEvent(derive.DeriverIdleEvent{Origin: refD}) emitter.AssertExpectations(t) diff --git a/op-node/rollup/interop/managed/system.go b/op-node/rollup/interop/managed/system.go index 5b2540b0492..2f79790c7ad 100644 --- a/op-node/rollup/interop/managed/system.go +++ b/op-node/rollup/interop/managed/system.go @@ -117,22 +117,25 @@ func (m *ManagedMode) OnEvent(ev event.Event) bool { ref := x.Ref.BlockRef() m.events.Send(&supervisortypes.ManagedEvent{UnsafeBlock: &ref}) case engine.LocalSafeUpdateEvent: - m.log.Info("Emitting local safe update because of L2 block", "derivedFrom", x.DerivedFrom, "derived", x.Ref) + m.log.Info("Emitting local safe update because of L2 block", "derivedFrom", x.Source, "derived", x.Ref) m.events.Send(&supervisortypes.ManagedEvent{DerivationUpdate: &supervisortypes.DerivedBlockRefPair{ - DerivedFrom: x.DerivedFrom, - Derived: x.Ref.BlockRef(), + Source: x.Source, + Derived: x.Ref.BlockRef(), }}) case derive.DeriverL1StatusEvent: m.log.Info("Emitting local safe update because of L1 traversal", "derivedFrom", x.Origin, "derived", x.LastL2) - m.events.Send(&supervisortypes.ManagedEvent{DerivationUpdate: &supervisortypes.DerivedBlockRefPair{ - DerivedFrom: x.Origin, - Derived: x.LastL2.BlockRef(), - }}) + m.events.Send(&supervisortypes.ManagedEvent{ + DerivationUpdate: &supervisortypes.DerivedBlockRefPair{ + Source: x.Origin, + Derived: x.LastL2.BlockRef(), + }, + DerivationOriginUpdate: &x.Origin, + }) case derive.ExhaustedL1Event: m.log.Info("Exhausted L1 data", "derivedFrom", x.L1Ref, "derived", x.LastL2) m.events.Send(&supervisortypes.ManagedEvent{ExhaustL1: &supervisortypes.DerivedBlockRefPair{ - DerivedFrom: x.L1Ref, - Derived: x.LastL2.BlockRef(), + Source: x.L1Ref, + Derived: x.LastL2.BlockRef(), }}) case engine.InteropReplacedBlockEvent: m.log.Info("Replaced block", "replacement", x.Ref) @@ -182,8 +185,8 @@ func (m *ManagedMode) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, return fmt.Errorf("failed to get L1BlockRef: %w", err) } m.emitter.Emit(engine.PromoteSafeEvent{ - Ref: l2Ref, - DerivedFrom: l1Ref, + Ref: l2Ref, + Source: l1Ref, }) // We return early: there is no point waiting for the cross-safe engine-update synchronously. // All error-feedback comes to the supervisor by aborting derivation tasks with an error. @@ -222,7 +225,7 @@ func (m *ManagedMode) InvalidateBlock(ctx context.Context, seal supervisortypes. Attributes: attributes, Parent: parentRef, Concluding: true, - DerivedFrom: engine.ReplaceBlockDerivedFrom, + DerivedFrom: engine.ReplaceBlockSource, } m.emitter.Emit(engine.InteropInvalidateBlockEvent{Invalidated: ref, Attributes: annotated}) @@ -241,8 +244,8 @@ func (m *ManagedMode) AnchorPoint(ctx context.Context) (supervisortypes.DerivedB return supervisortypes.DerivedBlockRefPair{}, fmt.Errorf("failed to fetch L2 block ref: %w", err) } return supervisortypes.DerivedBlockRefPair{ - DerivedFrom: l1Ref, - Derived: l2Ref.BlockRef(), + Source: l1Ref, + Derived: l2Ref.BlockRef(), }, nil } diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index ff68658ec06..d28b3830901 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -559,7 +559,9 @@ func (c *Config) ForkchoiceUpdatedVersion(attr *eth.PayloadAttributes) eth.Engin // NewPayloadVersion returns the EngineAPIMethod suitable for the chain hard fork version. func (c *Config) NewPayloadVersion(timestamp uint64) eth.EngineAPIMethod { - if c.IsEcotone(timestamp) { + if c.IsIsthmus(timestamp) { + return eth.NewPayloadV4 + } else if c.IsEcotone(timestamp) { // Cancun return eth.NewPayloadV3 } else { @@ -569,7 +571,9 @@ func (c *Config) NewPayloadVersion(timestamp uint64) eth.EngineAPIMethod { // GetPayloadVersion returns the EngineAPIMethod suitable for the chain hard fork version. func (c *Config) GetPayloadVersion(timestamp uint64) eth.EngineAPIMethod { - if c.IsEcotone(timestamp) { + if c.IsIsthmus(timestamp) { + return eth.GetPayloadV4 + } else if c.IsEcotone(timestamp) { // Cancun return eth.GetPayloadV3 } else { diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index dc7b5ded167..f714bc94723 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -693,6 +693,7 @@ func TestNewPayloadVersion(t *testing.T) { tests := []struct { name string ecotoneTime uint64 + isthmusTime uint64 payloadTime uint64 expectedMethod eth.EngineAPIMethod }{ @@ -700,20 +701,30 @@ func TestNewPayloadVersion(t *testing.T) { name: "BeforeEcotone", ecotoneTime: 10, payloadTime: 5, + isthmusTime: 20, expectedMethod: eth.NewPayloadV2, }, { name: "Ecotone", ecotoneTime: 10, payloadTime: 15, + isthmusTime: 20, expectedMethod: eth.NewPayloadV3, }, + { + name: "Isthmus", + ecotoneTime: 10, + payloadTime: 25, + isthmusTime: 20, + expectedMethod: eth.NewPayloadV4, + }, } for _, test := range tests { test := test t.Run(fmt.Sprintf("TestNewPayloadVersion_%s", test.name), func(t *testing.T) { config.EcotoneTime = &test.ecotoneTime + config.IsthmusTime = &test.isthmusTime assert.Equal(t, config.NewPayloadVersion(test.payloadTime), test.expectedMethod) }) } @@ -725,6 +736,7 @@ func TestGetPayloadVersion(t *testing.T) { config.CanyonTime = &canyonTime tests := []struct { name string + isthmusTime uint64 ecotoneTime uint64 payloadTime uint64 expectedMethod eth.EngineAPIMethod @@ -733,20 +745,30 @@ func TestGetPayloadVersion(t *testing.T) { name: "BeforeEcotone", ecotoneTime: 10, payloadTime: 5, + isthmusTime: 20, expectedMethod: eth.GetPayloadV2, }, { name: "Ecotone", ecotoneTime: 10, payloadTime: 15, + isthmusTime: 20, expectedMethod: eth.GetPayloadV3, }, + { + name: "Isthmus", + ecotoneTime: 10, + payloadTime: 25, + isthmusTime: 20, + expectedMethod: eth.GetPayloadV4, + }, } for _, test := range tests { test := test t.Run(fmt.Sprintf("TestGetPayloadVersion_%s", test.name), func(t *testing.T) { config.EcotoneTime = &test.ecotoneTime + config.IsthmusTime = &test.isthmusTime assert.Equal(t, config.GetPayloadVersion(test.payloadTime), test.expectedMethod) }) } diff --git a/op-program/client/interop/consolidate.go b/op-program/client/interop/consolidate.go index b66e7b0a254..47e0f4211e7 100644 --- a/op-program/client/interop/consolidate.go +++ b/op-program/client/interop/consolidate.go @@ -132,13 +132,7 @@ func isInvalidMessageError(err error) bool { type ConsolidateCheckDeps interface { cross.UnsafeFrontierCheckDeps cross.CycleCheckDeps - Check( - chain eth.ChainID, - blockNum uint64, - timestamp uint64, - logIdx uint32, - logHash common.Hash, - ) (includedIn supervisortypes.BlockSeal, err error) + Contains(chain eth.ChainID, query supervisortypes.ContainsQuery) (includedIn supervisortypes.BlockSeal, err error) } func checkHazards( @@ -203,15 +197,9 @@ func newConsolidateCheckDeps(transitionState *types.TransitionState, chains []et }, nil } -func (d *consolidateCheckDeps) Check( - chain eth.ChainID, - blockNum uint64, - timestamp uint64, - logIdx uint32, - logHash common.Hash, -) (includedIn supervisortypes.BlockSeal, err error) { +func (d *consolidateCheckDeps) Contains(chain eth.ChainID, query supervisortypes.ContainsQuery) (includedIn supervisortypes.BlockSeal, err error) { // We can assume the oracle has the block the executing message is in - block, err := d.BlockByNumber(d.oracle, blockNum, chain) + block, err := d.BlockByNumber(d.oracle, query.BlockNum, chain) if err != nil { return supervisortypes.BlockSeal{}, err } diff --git a/op-program/client/l2/engine.go b/op-program/client/l2/engine.go index 7a7ed731ee1..86e03ec04ef 100644 --- a/op-program/client/l2/engine.go +++ b/op-program/client/l2/engine.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -60,17 +61,28 @@ func (o *OracleEngine) L2OutputAtBlockHash(blockHash common.Hash) (*eth.OutputV0 func (o *OracleEngine) l2OutputAtHeader(header *types.Header) (*eth.OutputV0, error) { blockHash := header.Hash() - stateDB, err := o.backend.StateAt(header.Root) - if err != nil { - return nil, fmt.Errorf("failed to open L2 state db at block %s: %w", blockHash, err) - } - withdrawalsTrie, err := stateDB.OpenStorageTrie(predeploys.L2ToL1MessagePasserAddr) - if err != nil { - return nil, fmt.Errorf("withdrawals trie unavailable at block %v: %w", blockHash, err) + var storageRoot [32]byte + // if Isthmus is active, we don't need to compute the storage root, we can use the header + // withdrawalRoot which is the storage root for the L2ToL1MessagePasser contract + if o.rollupCfg.IsIsthmus(header.Time) { + if header.WithdrawalsHash == nil { + return nil, fmt.Errorf("unexpected nil withdrawalsHash in isthmus header for block %v", blockHash) + } + storageRoot = *header.WithdrawalsHash + } else { + stateDB, err := o.backend.StateAt(header.Root) + if err != nil { + return nil, fmt.Errorf("failed to open L2 state db at block %s: %w", blockHash, err) + } + withdrawalsTrie, err := stateDB.OpenStorageTrie(predeploys.L2ToL1MessagePasserAddr) + if err != nil { + return nil, fmt.Errorf("withdrawals trie unavailable at block %v: %w", blockHash, err) + } + storageRoot = withdrawalsTrie.Hash() } output := ð.OutputV0{ StateRoot: eth.Bytes32(header.Root), - MessagePasserStorageRoot: eth.Bytes32(withdrawalsTrie.Hash()), + MessagePasserStorageRoot: eth.Bytes32(storageRoot), BlockHash: blockHash, } return output, nil @@ -80,6 +92,8 @@ func (o *OracleEngine) GetPayload(ctx context.Context, payloadInfo eth.PayloadIn var res *eth.ExecutionPayloadEnvelope var err error switch method := o.rollupCfg.GetPayloadVersion(payloadInfo.Timestamp); method { + case eth.GetPayloadV4: + res, err = o.api.GetPayloadV4(ctx, payloadInfo.ID) case eth.GetPayloadV3: res, err = o.api.GetPayloadV3(ctx, payloadInfo.ID) case eth.GetPayloadV2: @@ -108,6 +122,8 @@ func (o *OracleEngine) ForkchoiceUpdate(ctx context.Context, state *eth.Forkchoi func (o *OracleEngine) NewPayload(ctx context.Context, payload *eth.ExecutionPayload, parentBeaconBlockRoot *common.Hash) (*eth.PayloadStatusV1, error) { switch method := o.rollupCfg.NewPayloadVersion(uint64(payload.Timestamp)); method { + case eth.NewPayloadV4: + return o.api.NewPayloadV4(ctx, payload, []common.Hash{}, parentBeaconBlockRoot, []hexutil.Bytes{}) case eth.NewPayloadV3: return o.api.NewPayloadV3(ctx, payload, []common.Hash{}, parentBeaconBlockRoot) case eth.NewPayloadV2: diff --git a/op-program/client/l2/engine_test.go b/op-program/client/l2/engine_test.go index 372d765032d..edaf3d19825 100644 --- a/op-program/client/l2/engine_test.go +++ b/op-program/client/l2/engine_test.go @@ -31,7 +31,7 @@ func TestPayloadByHash(t *testing.T) { ctx := context.Background() t.Run("KnownBlock", func(t *testing.T) { - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) block := stub.head payload, err := engine.PayloadByHash(ctx, block.Hash()) require.NoError(t, err) @@ -41,7 +41,7 @@ func TestPayloadByHash(t *testing.T) { }) t.Run("UnknownBlock", func(t *testing.T) { - engine, _ := createOracleEngine(t) + engine, _ := createOracleEngine(t, false) hash := common.HexToHash("0x878899") payload, err := engine.PayloadByHash(ctx, hash) require.ErrorIs(t, err, ErrNotFound) @@ -53,7 +53,7 @@ func TestPayloadByNumber(t *testing.T) { ctx := context.Background() t.Run("KnownBlock", func(t *testing.T) { - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) block := stub.head payload, err := engine.PayloadByNumber(ctx, block.NumberU64()) require.NoError(t, err) @@ -63,14 +63,14 @@ func TestPayloadByNumber(t *testing.T) { }) t.Run("NoCanonicalHash", func(t *testing.T) { - engine, _ := createOracleEngine(t) + engine, _ := createOracleEngine(t, false) payload, err := engine.PayloadByNumber(ctx, uint64(700)) require.ErrorIs(t, err, ErrNotFound) require.Nil(t, payload) }) t.Run("UnknownBlock", func(t *testing.T) { - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) hash := common.HexToHash("0x878899") number := uint64(700) stub.canonical[number] = hash @@ -82,7 +82,7 @@ func TestPayloadByNumber(t *testing.T) { func TestL2BlockRefByLabel(t *testing.T) { ctx := context.Background() - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) tests := []struct { name eth.BlockLabel block *types.Block @@ -108,7 +108,7 @@ func TestL2BlockRefByLabel(t *testing.T) { func TestL2BlockRefByHash(t *testing.T) { ctx := context.Background() - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) t.Run("KnownBlock", func(t *testing.T) { expected, err := derive.L2BlockToBlockRef(engine.rollupCfg, stub.safe) @@ -127,7 +127,7 @@ func TestL2BlockRefByHash(t *testing.T) { func TestSystemConfigByL2Hash(t *testing.T) { ctx := context.Background() - engine, stub := createOracleEngine(t) + engine, stub := createOracleEngine(t, false) t.Run("KnownBlock", func(t *testing.T) { payload, err := eth.BlockAsPayload(stub.safe, engine.backend.Config()) @@ -146,11 +146,25 @@ func TestSystemConfigByL2Hash(t *testing.T) { }) } -func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) { - head := createL2Block(t, 4) - safe := createL2Block(t, 3) - finalized := createL2Block(t, 2) +func TestL2OutputRootIsthmus(t *testing.T) { + engine, _ := createOracleEngine(t, true) + + t.Run("Header withdrawalsRoot without fetching state", func(t *testing.T) { + // should return without a panic since there's no need to fetch state when Isthmus is activate, + // StateAt() is not implemented in the stub + _, _, err := engine.L2OutputRoot(4) + require.NoError(t, err) + }) +} + +func createOracleEngine(t *testing.T, headBlockOnIsthmus bool) (*OracleEngine, *stubEngineBackend) { + head := createL2Block(t, 4, headBlockOnIsthmus) + safe := createL2Block(t, 3, false) + finalized := createL2Block(t, 2, false) rollupCfg := chaincfg.OPSepolia() + if headBlockOnIsthmus { + rollupCfg.IsthmusTime = &head.Header().Time + } backend := &stubEngineBackend{ head: head, safe: safe, @@ -174,7 +188,7 @@ func createOracleEngine(t *testing.T) (*OracleEngine, *stubEngineBackend) { return &engine, backend } -func createL2Block(t *testing.T, number int) *types.Block { +func createL2Block(t *testing.T, number int, setWithdrawalsRoot bool) *types.Block { tx, err := derive.L1InfoDeposit(chaincfg.OPSepolia(), eth.SystemConfig{}, uint64(1), eth.HeaderBlockInfo(&types.Header{ Number: big.NewInt(32), BaseFee: big.NewInt(7), @@ -187,7 +201,15 @@ func createL2Block(t *testing.T, number int) *types.Block { body := &types.Body{ Transactions: []*types.Transaction{types.NewTx(tx)}, } - return types.NewBlock(header, body, nil, trie.NewStackTrie(nil), types.DefaultBlockConfig) + blockConfig := types.DefaultBlockConfig + var withdrawals []*types.Withdrawal + if setWithdrawalsRoot { + withdrawals = make([]*types.Withdrawal, 0) + body.Withdrawals = withdrawals + header.WithdrawalsHash = &types.EmptyWithdrawalsHash + blockConfig = types.IsthmusBlockConfig + } + return types.NewBlock(header, body, nil, trie.NewStackTrie(nil), blockConfig) } type stubEngineBackend struct { @@ -273,8 +295,9 @@ func (s *stubEngineBackend) GetHeader(hash common.Hash, number uint64) *types.He panic("unsupported") } +// currently returns the head block's header (as required by a test) func (s *stubEngineBackend) GetHeaderByNumber(number uint64) *types.Header { - panic("unsupported") + return s.head.Header() } func (s *stubEngineBackend) GetHeaderByHash(hash common.Hash) *types.Header { diff --git a/op-program/client/l2/engineapi/block_processor.go b/op-program/client/l2/engineapi/block_processor.go index fb2de328f38..8a5e0e72a7b 100644 --- a/op-program/client/l2/engineapi/block_processor.go +++ b/op-program/client/l2/engineapi/block_processor.go @@ -38,6 +38,7 @@ type BlockProcessor struct { transactions types.Transactions gasPool *core.GasPool dataProvider BlockDataProvider + evm *vm.EVM } func NewBlockProcessorFromPayloadAttributes(provider BlockDataProvider, parent common.Hash, attrs *eth.PayloadAttributes) (*BlockProcessor, error) { @@ -94,6 +95,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* vmenv := vm.NewEVM(context, statedb, provider.Config(), vm.Config{PrecompileOverrides: precompileOverrides}) return vmenv } + vmenv := mkEVM() if h.ParentBeaconRoot != nil { if provider.Config().IsCancun(header.Number, header.Time) { // Blob tx not supported on optimism chains but fields must be set when Cancun is active. @@ -101,17 +103,18 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* header.BlobGasUsed = &zero header.ExcessBlobGas = &zero } - vmenv := mkEVM() core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv) } if provider.Config().IsPrague(header.Number, header.Time) { - vmenv := mkEVM() core.ProcessParentBlockHash(header.ParentHash, vmenv) } if provider.Config().IsIsthmus(header.Time) { // set the header withdrawals root for Isthmus blocks mpHash := statedb.GetStorageRoot(predeploys.L2ToL1MessagePasserAddr) header.WithdrawalsHash = &mpHash + + // set the header requests root to empty hash for Isthmus blocks + header.RequestsHash = &types.EmptyRequestsHash } return &BlockProcessor{ @@ -119,6 +122,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* state: statedb, gasPool: gasPool, dataProvider: provider, + evm: vmenv, }, nil } @@ -135,12 +139,7 @@ func (b *BlockProcessor) CheckTxWithinGasLimit(tx *types.Transaction) error { func (b *BlockProcessor) AddTx(tx *types.Transaction) error { txIndex := len(b.transactions) b.state.SetTxContext(tx.Hash(), txIndex) - - context := core.NewEVMBlockContext(b.header, b.dataProvider, nil, b.dataProvider.Config(), b.state) - vmConfig := *b.dataProvider.GetVMConfig() - // TODO(#14038): reuse evm - evm := vm.NewEVM(context, b.state, b.dataProvider.Config(), vmConfig) - receipt, err := core.ApplyTransaction(evm, b.gasPool, b.state, b.header, tx, &b.header.GasUsed) + receipt, err := core.ApplyTransaction(b.evm, b.gasPool, b.state, b.header, tx, &b.header.GasUsed) if err != nil { return fmt.Errorf("failed to apply transaction to L2 block (tx %d): %w", txIndex, err) } diff --git a/op-program/client/l2/engineapi/l2_engine_api.go b/op-program/client/l2/engineapi/l2_engine_api.go index 053689546f2..a885894f3f2 100644 --- a/op-program/client/l2/engineapi/l2_engine_api.go +++ b/op-program/client/l2/engineapi/l2_engine_api.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/state" @@ -223,6 +224,10 @@ func (ea *L2EngineAPI) GetPayloadV3(ctx context.Context, payloadId eth.PayloadID return ea.getPayload(ctx, payloadId) } +func (ea *L2EngineAPI) GetPayloadV4(ctx context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { + return ea.getPayload(ctx, payloadId) +} + func (ea *L2EngineAPI) config() *params.ChainConfig { return ea.backend.Config() } @@ -354,6 +359,36 @@ func (ea *L2EngineAPI) NewPayloadV3(ctx context.Context, params *eth.ExecutionPa return ea.newPayload(ctx, params, versionedHashes, beaconRoot, nil) } +// Ported from: https://github.com/ethereum-optimism/op-geth/blob/94bb3f660f770afd407280055e7f58c0d89a01af/eth/catalyst/api.go#L646 +func (ea *L2EngineAPI) NewPayloadV4(ctx context.Context, params *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) { + if params.Withdrawals == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } + if params.ExcessBlobGas == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun")) + } + if params.BlobGasUsed == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun")) + } + + if versionedHashes == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun")) + } + if beaconRoot == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun")) + } + if executionRequests == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague")) + } + + if !ea.config().IsIsthmus(uint64(params.Timestamp)) { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.UnsupportedFork.With(errors.New("newPayloadV4 called pre-isthmus")) + } + + requests := convertRequests(executionRequests) + return ea.newPayload(ctx, params, versionedHashes, beaconRoot, requests) +} + func (ea *L2EngineAPI) getPayload(_ context.Context, payloadId eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { ea.log.Trace("L2Engine API request received", "method", "GetPayload", "id", payloadId) if ea.payloadID != payloadId { @@ -567,3 +602,15 @@ func (ea *L2EngineAPI) invalid(err error, latestValid *types.Header) *eth.Payloa errorMsg := err.Error() return ð.PayloadStatusV1{Status: eth.ExecutionInvalid, LatestValidHash: ¤tHash, ValidationError: &errorMsg} } + +// convertRequests converts a hex requests slice to plain [][]byte. +func convertRequests(hex []hexutil.Bytes) [][]byte { + if hex == nil { + return nil + } + req := make([][]byte, len(hex)) + for i := range hex { + req[i] = hex[i] + } + return req +} diff --git a/op-program/client/l2/engineapi/l2_engine_api_test.go b/op-program/client/l2/engineapi/l2_engine_api_test.go index fd4eae3a768..998b4c942db 100644 --- a/op-program/client/l2/engineapi/l2_engine_api_test.go +++ b/op-program/client/l2/engineapi/l2_engine_api_test.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -20,6 +21,72 @@ import ( "github.com/stretchr/testify/require" ) +func TestNewPayloadV4(t *testing.T) { + cases := []struct { + isthmusTime uint64 + blockTime uint64 + expectedError string + }{ + {6, 5, engine.UnsupportedFork.Error()}, // before isthmus + {6, 8, ""}, // after isthmus + } + logger, _ := testlog.CaptureLogger(t, log.LvlInfo) + + for _, c := range cases { + genesis := createGenesis() + isthmusTime := c.isthmusTime + genesis.Config.IsthmusTime = &isthmusTime + ethCfg := ðconfig.Config{ + NetworkId: genesis.Config.ChainID.Uint64(), + Genesis: genesis, + StateScheme: rawdb.HashScheme, + NoPruning: true, + } + backend := newStubBackendWithConfig(t, ethCfg) + engineAPI := NewL2EngineAPI(logger, backend, nil) + require.NotNil(t, engineAPI) + genesisBlock := backend.GetHeaderByNumber(0) + genesisHash := genesisBlock.Hash() + eip1559Params := eth.Bytes8([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}) + gasLimit := eth.Uint64Quantity(4712388) + result, err := engineAPI.ForkchoiceUpdatedV3(context.Background(), ð.ForkchoiceState{ + HeadBlockHash: genesisHash, + SafeBlockHash: genesisHash, + FinalizedBlockHash: genesisHash, + }, ð.PayloadAttributes{ + Timestamp: eth.Uint64Quantity(genesisBlock.Time + c.blockTime), + PrevRandao: eth.Bytes32{0x11}, + SuggestedFeeRecipient: common.Address{0x33}, + Withdrawals: &types.Withdrawals{}, + ParentBeaconBlockRoot: &common.Hash{0x22}, + NoTxPool: false, + GasLimit: &gasLimit, + EIP1559Params: &eip1559Params, + }) + require.NoError(t, err) + require.EqualValues(t, engine.VALID, result.PayloadStatus.Status) + require.NotNil(t, result.PayloadID) + + var envelope *eth.ExecutionPayloadEnvelope + if c.blockTime >= c.isthmusTime { + envelope, err = engineAPI.GetPayloadV4(context.Background(), *result.PayloadID) + } else { + envelope, err = engineAPI.GetPayloadV3(context.Background(), *result.PayloadID) + } + require.NoError(t, err) + require.NotNil(t, envelope) + + newPayloadResult, err := engineAPI.NewPayloadV4(context.Background(), envelope.ExecutionPayload, []common.Hash{}, envelope.ParentBeaconBlockRoot, []hexutil.Bytes{}) + if c.expectedError != "" { + require.ErrorContains(t, err, c.expectedError) + continue + } else { + require.NoError(t, err) + } + require.EqualValues(t, engine.VALID, newPayloadResult.Status) + } +} + func TestCreatedBlocksAreCached(t *testing.T) { logger, logs := testlog.CaptureLogger(t, log.LvlInfo) @@ -47,10 +114,10 @@ func TestCreatedBlocksAreCached(t *testing.T) { require.EqualValues(t, engine.VALID, result.PayloadStatus.Status) require.NotNil(t, result.PayloadID) - envelope, err := engineAPI.GetPayloadV3(context.Background(), *result.PayloadID) + envelope, err := engineAPI.GetPayloadV4(context.Background(), *result.PayloadID) require.NoError(t, err) require.NotNil(t, envelope) - newPayloadResult, err := engineAPI.NewPayloadV3(context.Background(), envelope.ExecutionPayload, []common.Hash{}, envelope.ParentBeaconBlockRoot) + newPayloadResult, err := engineAPI.NewPayloadV4(context.Background(), envelope.ExecutionPayload, []common.Hash{}, envelope.ParentBeaconBlockRoot, []hexutil.Bytes{}) require.NoError(t, err) require.EqualValues(t, engine.VALID, newPayloadResult.Status) @@ -59,14 +126,7 @@ func TestCreatedBlocksAreCached(t *testing.T) { require.Equal(t, envelope.ExecutionPayload.BlockHash, foundLog.AttrValue("hash")) } -func newStubBackend(t *testing.T) *stubCachingBackend { - genesis := createGenesis() - ethCfg := ðconfig.Config{ - NetworkId: genesis.Config.ChainID.Uint64(), - Genesis: genesis, - StateScheme: rawdb.HashScheme, - NoPruning: true, - } +func newStubBackendWithConfig(t *testing.T, ethCfg *ethconfig.Config) *stubCachingBackend { nodeCfg := &node.Config{ Name: "l2-geth", } @@ -82,6 +142,17 @@ func newStubBackend(t *testing.T) *stubCachingBackend { return &stubCachingBackend{EngineBackend: chain} } +func newStubBackend(t *testing.T) *stubCachingBackend { + genesis := createGenesis() + ethCfg := ðconfig.Config{ + NetworkId: genesis.Config.ChainID.Uint64(), + Genesis: genesis, + StateScheme: rawdb.HashScheme, + NoPruning: true, + } + return newStubBackendWithConfig(t, ethCfg) +} + func createGenesis() *core.Genesis { config := *params.MergedTestChainConfig config.PragueTime = nil @@ -93,6 +164,7 @@ func createGenesis() *core.Genesis { config.FjordTime = &zero config.GraniteTime = &zero config.HoloceneTime = &zero + config.IsthmusTime = &zero l2Genesis := &core.Genesis{ Config: &config, diff --git a/op-program/client/l2/engineapi/test/l2_engine_api_tests.go b/op-program/client/l2/engineapi/test/l2_engine_api_tests.go index 4a78e258e78..77168255e96 100644 --- a/op-program/client/l2/engineapi/test/l2_engine_api_tests.go +++ b/op-program/client/l2/engineapi/test/l2_engine_api_tests.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -442,7 +443,9 @@ func (h *testHelper) getPayload(id *eth.PayloadID) *eth.ExecutionPayloadEnvelope func (h *testHelper) callNewPayload(envelope *eth.ExecutionPayloadEnvelope) (*eth.PayloadStatusV1, error) { n := new(big.Int).SetUint64(uint64(envelope.ExecutionPayload.BlockNumber)) - if h.backend.Config().IsCancun(n, uint64(envelope.ExecutionPayload.Timestamp)) { + if h.backend.Config().IsIsthmus(uint64(envelope.ExecutionPayload.Timestamp)) { + return h.engine.NewPayloadV4(h.ctx, envelope.ExecutionPayload, []common.Hash{}, envelope.ParentBeaconBlockRoot, []hexutil.Bytes{}) + } else if h.backend.Config().IsCancun(n, uint64(envelope.ExecutionPayload.Timestamp)) { return h.engine.NewPayloadV3(h.ctx, envelope.ExecutionPayload, []common.Hash{}, envelope.ParentBeaconBlockRoot) } else { return h.engine.NewPayloadV2(h.ctx, envelope.ExecutionPayload) diff --git a/op-service/eth/supervisor_status.go b/op-service/eth/supervisor_status.go new file mode 100644 index 00000000000..bb47e179a2b --- /dev/null +++ b/op-service/eth/supervisor_status.go @@ -0,0 +1,16 @@ +package eth + +type SupervisorSyncStatus struct { + // MinSyncedL1 is the highest L1 block that has been processed by all supervisor nodes. + // This is not the same as the latest L1 block known to the supervisor, + // but rather the L1 block view of the supervisor nodes. + // This L1 block may not be fully derived into L2 data on all nodes yet. + MinSyncedL1 L1BlockRef `json:"minSyncedL1"` + Chains map[ChainID]*SupervisorChainSyncStatus `json:"chains"` +} + +// SupervisorChainStatus is the status of a chain as seen by the supervisor. +type SupervisorChainSyncStatus struct { + // LocalUnsafe is the latest L2 block that has been processed by the supervisor. + LocalUnsafe BlockRef `json:"localUnsafe"` +} diff --git a/op-service/eth/types.go b/op-service/eth/types.go index d4f1f05c376..76d7fdd9f34 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -638,9 +638,11 @@ const ( NewPayloadV2 EngineAPIMethod = "engine_newPayloadV2" NewPayloadV3 EngineAPIMethod = "engine_newPayloadV3" + NewPayloadV4 EngineAPIMethod = "engine_newPayloadV4" GetPayloadV2 EngineAPIMethod = "engine_getPayloadV2" GetPayloadV3 EngineAPIMethod = "engine_getPayloadV3" + GetPayloadV4 EngineAPIMethod = "engine_getPayloadV4" ) // StorageKey is a marshaling utility for hex-encoded storage keys, which can have leading 0s and are diff --git a/op-service/sources/engine_client.go b/op-service/sources/engine_client.go index 53a40414f4c..c16629b77d2 100644 --- a/op-service/sources/engine_client.go +++ b/op-service/sources/engine_client.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" @@ -103,6 +104,8 @@ func (s *EngineAPIClient) NewPayload(ctx context.Context, payload *eth.Execution var err error switch method := s.evp.NewPayloadVersion(uint64(payload.Timestamp)); method { + case eth.NewPayloadV4: + err = s.RPC.CallContext(ctx, &result, string(method), payload, []common.Hash{}, parentBeaconBlockRoot, []hexutil.Bytes{}) case eth.NewPayloadV3: err = s.RPC.CallContext(ctx, &result, string(method), payload, []common.Hash{}, parentBeaconBlockRoot) case eth.NewPayloadV2: diff --git a/op-service/sources/supervisor_client.go b/op-service/sources/supervisor_client.go index ba4ec79054e..d543bb096ce 100644 --- a/op-service/sources/supervisor_client.go +++ b/op-service/sources/supervisor_client.go @@ -174,6 +174,15 @@ func (cl *SupervisorClient) AllSafeDerivedAt(ctx context.Context, derivedFrom et return result, err } +func (cl *SupervisorClient) SyncStatus(ctx context.Context) (eth.SupervisorSyncStatus, error) { + var result eth.SupervisorSyncStatus + err := cl.client.CallContext( + ctx, + &result, + "supervisor_syncStatus") + return result, err +} + func (cl *SupervisorClient) Close() { cl.client.Close() } diff --git a/op-service/txmgr/metrics/noop.go b/op-service/txmgr/metrics/noop.go index 6ad796d0169..47b1a52c54f 100644 --- a/op-service/txmgr/metrics/noop.go +++ b/op-service/txmgr/metrics/noop.go @@ -2,6 +2,7 @@ package metrics import ( "math/big" + "sync/atomic" "github.com/ethereum/go-ethereum/core/types" ) @@ -18,3 +19,16 @@ func (*NoopTxMetrics) RecordBaseFee(*big.Int) {} func (*NoopTxMetrics) RecordBlobBaseFee(*big.Int) {} func (*NoopTxMetrics) RecordTipCap(*big.Int) {} func (*NoopTxMetrics) RPCError() {} + +type FakeTxMetrics struct { + NoopTxMetrics + pendingTxs atomic.Uint64 +} + +func (m *FakeTxMetrics) RecordPendingTx(p int64) { + m.pendingTxs.Store(uint64(p)) +} + +func (m *FakeTxMetrics) PendingTxs() uint64 { + return m.pendingTxs.Load() +} diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 27dce154bcc..dd9ed5c0287 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -264,3 +264,75 @@ func TestQueue_Send(t *testing.T) { }) } } + +// mockBackendWithConfirmationDelay is a mock backend that delays the confirmation of transactions +type mockBackendWithConfirmationDelay struct { + mockBackend + cachedTxs map[common.Hash]*types.Transaction +} + +// newMockBackendWithConfirmationDelay creates a new mock backend with a confirmation delay. It accepts +// a waitGroup which will be decremented when a transaction is sent. +func newMockBackendWithConfirmationDelay(g *gasPricer, wg *sync.WaitGroup) *mockBackendWithConfirmationDelay { + b := &mockBackendWithConfirmationDelay{} + b.cachedTxs = make(map[common.Hash]*types.Transaction) + b.minedTxs = make(map[common.Hash]minedTxInfo) + b.g = g + + sendTx := func(ctx context.Context, tx *types.Transaction) error { + _, exists := b.cachedTxs[tx.Hash()] + if !exists { + b.cachedTxs[tx.Hash()] = tx + wg.Done() + } + return nil + } + b.setTxSender(sendTx) + + return b +} + +// MineAll mines all transactions in the cache. +func (b *mockBackendWithConfirmationDelay) MineAll() { + for hash, tx := range b.cachedTxs { + b.mine(&hash, tx.GasFeeCap(), nil) + } +} + +// Simple test that we can call q.Send() up to the maxPending limit without blocking. +func TestQueue_Send_MaxPendingMetrics(t *testing.T) { + maxPending := 5 + + // boilerplate setup + wg := sync.WaitGroup{} + backend := newMockBackendWithConfirmationDelay(newGasPricer(3), &wg) + metrics := metrics.FakeTxMetrics{} + conf := configWithNumConfs(1) + conf.Backend = backend + conf.NetworkTimeout = 1 * time.Second + conf.ChainID = big.NewInt(1) + mgr, err := NewSimpleTxManagerFromConfig("TEST", testlog.Logger(t, log.LevelDebug), &metrics, conf) + require.NoError(t, err) + + // Construct queue with maxPending limit, mocks and fakes + q := NewQueue[int](context.Background(), mgr, uint64(maxPending)) + + // Send maxPending transactions + for nonce := 0; nonce < maxPending; nonce++ { + wg.Add(1) // Allows us to wait for this transaction to be cached by the backend + q.Send(nonce, TxCandidate{}, make(chan TxReceipt[int], 1)) + } + + // Check that all of the transactions are pending + require.EqualValues(t, maxPending, metrics.PendingTxs()) + + // Wait for the backend to cache all of the transactions + wg.Wait() + + // Mine the transactions (should cause the pending transactions to drop to 0) + backend.MineAll() + require.Eventually(t, func() bool { + t.Log("Pending txs", metrics.PendingTxs()) + return metrics.PendingTxs() == 0 + }, 5*time.Second, 1*time.Second, "PendingTxs metric should drop to 0 after all transactions are mined") +} diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index 56159eba33f..97fd45954b3 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -282,8 +282,6 @@ func (m *SimpleTxManager) SendAsync(ctx context.Context, candidate TxCandidate, return } - m.metr.RecordPendingTx(m.pending.Add(1)) - var cancel context.CancelFunc if m.cfg.TxSendTimeout == 0 { ctx, cancel = context.WithCancel(ctx) @@ -295,7 +293,6 @@ func (m *SimpleTxManager) SendAsync(ctx context.Context, candidate TxCandidate, if err != nil { m.resetNonce() cancel() - m.metr.RecordPendingTx(m.pending.Add(-1)) ch <- SendResponse{ Receipt: nil, Err: err, @@ -303,8 +300,10 @@ func (m *SimpleTxManager) SendAsync(ctx context.Context, candidate TxCandidate, return } + m.metr.RecordPendingTx(m.pending.Add(1)) + go func() { - defer m.metr.RecordPendingTx(m.pending.Add(-1)) + defer func() { m.metr.RecordPendingTx(m.pending.Add(-1)) }() defer cancel() receipt, err := m.sendTx(ctx, tx) if err != nil { diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index c4bdc550e0f..3f85f9539a8 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/l1access" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/rewinder" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/status" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/superevents" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" @@ -58,6 +59,9 @@ type SupervisorBackend struct { // syncNodesController controls the derivation or reset of the sync nodes syncNodesController *syncnode.SyncNodesController + // statusTracker tracks the sync status of the supervisor + statusTracker *status.StatusTracker + // synchronousProcessors disables background-workers, // requiring manual triggers for the backend to process l2 data. synchronousProcessors bool @@ -137,6 +141,10 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, super.syncNodesController = syncnode.NewSyncNodesController(logger, depSet, eventSys, super) eventSys.Register("sync-controller", super.syncNodesController, event.DefaultRegisterOpts()) + // create status tracker + super.statusTracker = status.NewStatusTracker() + eventSys.Register("status", super.statusTracker, event.DefaultRegisterOpts()) + // Initialize the resources of the supervisor backend. // Stop the supervisor if any of the resources fails to be initialized. if err := super.initResources(ctx, cfg); err != nil { @@ -247,17 +255,17 @@ func (su *SupervisorBackend) openChainDBs(chainID eth.ChainID) error { } su.chainDBs.AddLogDB(chainID, logDB) - localDB, err := db.OpenLocalDerivedFromDB(su.logger, chainID, su.dataDir, cm) + localDB, err := db.OpenLocalDerivationDB(su.logger, chainID, su.dataDir, cm) if err != nil { return fmt.Errorf("failed to open local derived-from DB of chain %s: %w", chainID, err) } - su.chainDBs.AddLocalDerivedFromDB(chainID, localDB) + su.chainDBs.AddLocalDerivationDB(chainID, localDB) - crossDB, err := db.OpenCrossDerivedFromDB(su.logger, chainID, su.dataDir, cm) + crossDB, err := db.OpenCrossDerivationDB(su.logger, chainID, su.dataDir, cm) if err != nil { return fmt.Errorf("failed to open cross derived-from DB of chain %s: %w", chainID, err) } - su.chainDBs.AddCrossDerivedFromDB(chainID, crossDB) + su.chainDBs.AddCrossDerivationDB(chainID, crossDB) su.chainDBs.AddCrossUnsafeTracker(chainID) @@ -394,7 +402,13 @@ func (su *SupervisorBackend) CheckMessage(identifier types.Identifier, payloadHa chainID := identifier.ChainID blockNum := identifier.BlockNumber logIdx := identifier.LogIndex - _, err := su.chainDBs.Check(chainID, blockNum, identifier.Timestamp, logIdx, logHash) + _, err := su.chainDBs.Contains(chainID, + types.ContainsQuery{ + BlockNum: blockNum, + Timestamp: identifier.Timestamp, + LogIdx: logIdx, + LogHash: logHash, + }) if errors.Is(err, types.ErrFuture) { su.logger.Debug("Future message", "identifier", identifier, "payloadHash", payloadHash, "err", err) return types.LocalUnsafe, nil @@ -442,8 +456,8 @@ func (su *SupervisorBackend) CrossSafe(ctx context.Context, chainID eth.ChainID) return types.DerivedIDPair{}, err } return types.DerivedIDPair{ - DerivedFrom: p.DerivedFrom.ID(), - Derived: p.Derived.ID(), + Source: p.Source.ID(), + Derived: p.Derived.ID(), }, nil } @@ -453,8 +467,8 @@ func (su *SupervisorBackend) LocalSafe(ctx context.Context, chainID eth.ChainID) return types.DerivedIDPair{}, err } return types.DerivedIDPair{ - DerivedFrom: p.DerivedFrom.ID(), - Derived: p.Derived.ID(), + Source: p.Source.ID(), + Derived: p.Derived.ID(), }, nil } @@ -508,8 +522,8 @@ func (su *SupervisorBackend) FinalizedL1() eth.BlockRef { return su.chainDBs.FinalizedL1() } -func (su *SupervisorBackend) CrossDerivedFrom(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { - v, err := su.chainDBs.CrossDerivedFromBlockRef(chainID, derived) +func (su *SupervisorBackend) CrossDerivedToSource(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (source eth.BlockRef, err error) { + v, err := su.chainDBs.CrossDerivedToSourceRef(chainID, derived) if err != nil { return eth.BlockRef{}, err } @@ -528,7 +542,7 @@ func (su *SupervisorBackend) SuperRootAtTimestamp(ctx context.Context, timestamp chainInfos := make([]eth.ChainRootInfo, len(chains)) superRootChains := make([]eth.ChainIDAndOutput, len(chains)) - var crossSafeDerivedFrom eth.BlockID + var crossSafeSource eth.BlockID for i, chainID := range chains { src, ok := su.syncSources.Get(chainID) @@ -556,12 +570,12 @@ func (su *SupervisorBackend) SuperRootAtTimestamp(ctx context.Context, timestamp if err != nil { return eth.SuperRootResponse{}, err } - derivedFrom, err := su.chainDBs.CrossDerivedFrom(chainID, ref.ID()) + derivedFrom, err := su.chainDBs.CrossDerivedToSource(chainID, ref.ID()) if err != nil { return eth.SuperRootResponse{}, err } - if crossSafeDerivedFrom.Number == 0 || crossSafeDerivedFrom.Number < derivedFrom.Number { - crossSafeDerivedFrom = derivedFrom.ID() + if crossSafeSource.Number == 0 || crossSafeSource.Number < derivedFrom.Number { + crossSafeSource = derivedFrom.ID() } } superRoot := eth.SuperRoot(ð.SuperV1{ @@ -569,13 +583,17 @@ func (su *SupervisorBackend) SuperRootAtTimestamp(ctx context.Context, timestamp Chains: superRootChains, }) return eth.SuperRootResponse{ - CrossSafeDerivedFrom: crossSafeDerivedFrom, + CrossSafeDerivedFrom: crossSafeSource, Timestamp: uint64(timestamp), SuperRoot: superRoot, Chains: chainInfos, }, nil } +func (su *SupervisorBackend) SyncStatus() (eth.SupervisorSyncStatus, error) { + return su.statusTracker.SyncStatus(), nil +} + // PullLatestL1 makes the supervisor aware of the latest L1 block. Exposed for testing purposes. func (su *SupervisorBackend) PullLatestL1() error { return su.l1Accessor.PullLatest() diff --git a/op-supervisor/supervisor/backend/cross/safe_frontier.go b/op-supervisor/supervisor/backend/cross/safe_frontier.go index 17a47d0e478..7697b30613d 100644 --- a/op-supervisor/supervisor/backend/cross/safe_frontier.go +++ b/op-supervisor/supervisor/backend/cross/safe_frontier.go @@ -12,7 +12,7 @@ import ( type SafeFrontierCheckDeps interface { CandidateCrossSafe(chain eth.ChainID) (candidate types.DerivedBlockRefPair, err error) - CrossDerivedFrom(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) + CrossDerivedToSource(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) DependencySet() depset.DependencySet } @@ -21,7 +21,7 @@ type SafeFrontierCheckDeps interface { // - already cross-safe. // - the first (if not first: local blocks to verify before proceeding) // local-safe block, after the cross-safe block. -func HazardSafeFrontierChecks(d SafeFrontierCheckDeps, inL1DerivedFrom eth.BlockID, hazards map[types.ChainIndex]types.BlockSeal) error { +func HazardSafeFrontierChecks(d SafeFrontierCheckDeps, inL1Source eth.BlockID, hazards map[types.ChainIndex]types.BlockSeal) error { depSet := d.DependencySet() for hazardChainIndex, hazardBlock := range hazards { hazardChainID, err := depSet.ChainIDFromIndex(hazardChainIndex) @@ -31,7 +31,7 @@ func HazardSafeFrontierChecks(d SafeFrontierCheckDeps, inL1DerivedFrom eth.Block } return err } - initDerivedFrom, err := d.CrossDerivedFrom(hazardChainID, hazardBlock.ID()) + initSource, err := d.CrossDerivedToSource(hazardChainID, hazardBlock.ID()) if err != nil { if errors.Is(err, types.ErrFuture) { // If not in cross-safe scope, then check if it's the candidate cross-safe block. @@ -43,16 +43,16 @@ func HazardSafeFrontierChecks(d SafeFrontierCheckDeps, inL1DerivedFrom eth.Block return fmt.Errorf("expected block %s (chain %d) does not match candidate local-safe block %s: %w", hazardBlock, hazardChainID, candidate.Derived, types.ErrConflict) } - if candidate.DerivedFrom.Number > inL1DerivedFrom.Number { + if candidate.Source.Number > inL1Source.Number { return fmt.Errorf("local-safe hazard block %s derived from L1 block %s is after scope %s: %w", - hazardBlock.ID(), initDerivedFrom, inL1DerivedFrom, types.ErrOutOfScope) + hazardBlock.ID(), initSource, inL1Source, types.ErrOutOfScope) } } else { return fmt.Errorf("failed to determine cross-derived of hazard block %s (chain %s): %w", hazardBlock, hazardChainID, err) } - } else if initDerivedFrom.Number > inL1DerivedFrom.Number { + } else if initSource.Number > inL1Source.Number { return fmt.Errorf("cross-safe hazard block %s derived from L1 block %s is after scope %s: %w", - hazardBlock.ID(), initDerivedFrom, inL1DerivedFrom, types.ErrOutOfScope) + hazardBlock.ID(), initSource, inL1Source, types.ErrOutOfScope) } } return nil diff --git a/op-supervisor/supervisor/backend/cross/safe_frontier_test.go b/op-supervisor/supervisor/backend/cross/safe_frontier_test.go index fca91c94fcb..a9ccaef4caf 100644 --- a/op-supervisor/supervisor/backend/cross/safe_frontier_test.go +++ b/op-supervisor/supervisor/backend/cross/safe_frontier_test.go @@ -14,11 +14,11 @@ import ( func TestHazardSafeFrontierChecks(t *testing.T) { t.Run("empty hazards", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - l1DerivedFrom := eth.BlockID{} + l1Source := eth.BlockID{} hazards := map[types.ChainIndex]types.BlockSeal{} // when there are no hazards, // no work is done, and no error is returned - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.NoError(t, err) }) t.Run("unknown chain", func(t *testing.T) { @@ -29,114 +29,114 @@ func TestHazardSafeFrontierChecks(t *testing.T) { }, }, } - l1DerivedFrom := eth.BlockID{} + l1Source := eth.BlockID{} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {}} // when there is one hazard, and ChainIDFromIndex returns ErrUnknownChain, // an error is returned as a ErrConflict - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorIs(t, err, types.ErrConflict) }) - t.Run("initDerivedFrom in scope", func(t *testing.T) { + t.Run("initSource in scope", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{Number: 1}, nil } - l1DerivedFrom := eth.BlockID{Number: 2} + l1Source := eth.BlockID{Number: 2} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {}} - // when there is one hazard, and CrossDerivedFrom returns a BlockSeal within scope + // when there is one hazard, and CrossSource returns a BlockSeal within scope // (ie the hazard's block number is less than or equal to the derivedFrom block number), // no error is returned - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.NoError(t, err) }) - t.Run("initDerivedFrom out of scope", func(t *testing.T) { + t.Run("initSource out of scope", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{Number: 3}, nil } - l1DerivedFrom := eth.BlockID{Number: 2} + l1Source := eth.BlockID{Number: 2} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {}} - // when there is one hazard, and CrossDerivedFrom returns a BlockSeal out of scope + // when there is one hazard, and CrossSource returns a BlockSeal out of scope // (ie the hazard's block number is greater than the derivedFrom block number), // an error is returned as a ErrOutOfScope - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorIs(t, err, types.ErrOutOfScope) }) t.Run("errFuture: candidate cross safe failure", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{Number: 3}, types.ErrFuture } sfcd.candidateCrossSafeFn = func() (candidate types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{}, - Derived: eth.BlockRef{Number: 3, Hash: common.BytesToHash([]byte{0x01})}}, + Source: eth.BlockRef{}, + Derived: eth.BlockRef{Number: 3, Hash: common.BytesToHash([]byte{0x01})}}, errors.New("some error") } - l1DerivedFrom := eth.BlockID{} + l1Source := eth.BlockID{} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {}} - // when there is one hazard, and CrossDerivedFrom returns an ErrFuture, + // when there is one hazard, and CrossSource returns an ErrFuture, // and CandidateCrossSafe returns an error, // the error from CandidateCrossSafe is returned - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorContains(t, err, "some error") }) t.Run("errFuture: expected block does not match candidate", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{}, types.ErrFuture } sfcd.candidateCrossSafeFn = func() (candidate types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{}, - Derived: eth.BlockRef{Number: 3, Hash: common.BytesToHash([]byte{0x01})}, + Source: eth.BlockRef{}, + Derived: eth.BlockRef{Number: 3, Hash: common.BytesToHash([]byte{0x01})}, }, nil } - l1DerivedFrom := eth.BlockID{} + l1Source := eth.BlockID{} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {Number: 3, Hash: common.BytesToHash([]byte{0x02})}} - // when there is one hazard, and CrossDerivedFrom returns an ErrFuture, + // when there is one hazard, and CrossSource returns an ErrFuture, // and CandidateCrossSafe returns a candidate that does not match the hazard, // (ie the candidate's block number is the same as the hazard's block number, but the hashes are different), // an error is returned as a ErrConflict - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorIs(t, err, types.ErrConflict) }) t.Run("errFuture: local-safe hazard out of scope", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{}, types.ErrFuture } sfcd.candidateCrossSafeFn = func() (candidate types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{Number: 9}, - Derived: eth.BlockRef{}}, + Source: eth.BlockRef{Number: 9}, + Derived: eth.BlockRef{}}, nil } - l1DerivedFrom := eth.BlockID{Number: 8} + l1Source := eth.BlockID{Number: 8} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {Number: 3, Hash: common.BytesToHash([]byte{0x02})}} - // when there is one hazard, and CrossDerivedFrom returns an ErrFuture, - // and the initDerivedFrom is out of scope, + // when there is one hazard, and CrossSource returns an ErrFuture, + // and the initSource is out of scope, // an error is returned as a ErrOutOfScope - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorIs(t, err, types.ErrOutOfScope) }) - t.Run("CrossDerivedFrom Error", func(t *testing.T) { + t.Run("CrossSource Error", func(t *testing.T) { sfcd := &mockSafeFrontierCheckDeps{} - sfcd.crossDerivedFromFn = func() (types.BlockSeal, error) { + sfcd.crossSourceFn = func() (types.BlockSeal, error) { return types.BlockSeal{}, errors.New("some error") } sfcd.candidateCrossSafeFn = func() (candidate types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{Number: 9}, - Derived: eth.BlockRef{}, + Source: eth.BlockRef{Number: 9}, + Derived: eth.BlockRef{}, }, nil } - l1DerivedFrom := eth.BlockID{Number: 8} + l1Source := eth.BlockID{Number: 8} hazards := map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): {Number: 3, Hash: common.BytesToHash([]byte{0x02})}} - // when there is one hazard, and CrossDerivedFrom returns an ErrFuture, - // and the initDerivedFrom is out of scope, + // when there is one hazard, and CrossSource returns an ErrFuture, + // and the initSource is out of scope, // an error is returned as a ErrOutOfScope - err := HazardSafeFrontierChecks(sfcd, l1DerivedFrom, hazards) + err := HazardSafeFrontierChecks(sfcd, l1Source, hazards) require.ErrorContains(t, err, "some error") }) } @@ -144,7 +144,7 @@ func TestHazardSafeFrontierChecks(t *testing.T) { type mockSafeFrontierCheckDeps struct { deps mockDependencySet candidateCrossSafeFn func() (candidate types.DerivedBlockRefPair, err error) - crossDerivedFromFn func() (derivedFrom types.BlockSeal, err error) + crossSourceFn func() (derivedFrom types.BlockSeal, err error) } func (m *mockSafeFrontierCheckDeps) CandidateCrossSafe(chain eth.ChainID) (candidate types.DerivedBlockRefPair, err error) { @@ -154,9 +154,9 @@ func (m *mockSafeFrontierCheckDeps) CandidateCrossSafe(chain eth.ChainID) (candi return types.DerivedBlockRefPair{}, nil } -func (m *mockSafeFrontierCheckDeps) CrossDerivedFrom(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { - if m.crossDerivedFromFn != nil { - return m.crossDerivedFromFn() +func (m *mockSafeFrontierCheckDeps) CrossDerivedToSource(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { + if m.crossSourceFn != nil { + return m.crossSourceFn() } return types.BlockSeal{}, nil } diff --git a/op-supervisor/supervisor/backend/cross/safe_start.go b/op-supervisor/supervisor/backend/cross/safe_start.go index 37a50d6c9ec..4ee5f2a0207 100644 --- a/op-supervisor/supervisor/backend/cross/safe_start.go +++ b/op-supervisor/supervisor/backend/cross/safe_start.go @@ -4,17 +4,15 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type SafeStartDeps interface { - Check(chain eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) + Contains(chain eth.ChainID, query types.ContainsQuery) (includedIn types.BlockSeal, err error) - CrossDerivedFrom(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) + CrossDerivedToSource(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) DependencySet() depset.DependencySet } @@ -22,7 +20,7 @@ type SafeStartDeps interface { // CrossSafeHazards checks if the given messages all exist and pass invariants. // It returns a hazard-set: if any intra-block messaging happened, // these hazard blocks have to be verified. -func CrossSafeHazards(d SafeStartDeps, chainID eth.ChainID, inL1DerivedFrom eth.BlockID, +func CrossSafeHazards(d SafeStartDeps, chainID eth.ChainID, inL1Source eth.BlockID, candidate types.BlockSeal, execMsgs []*types.ExecutingMessage) (hazards map[types.ChainIndex]types.BlockSeal, err error) { hazards = make(map[types.ChainIndex]types.BlockSeal) @@ -61,17 +59,23 @@ func CrossSafeHazards(d SafeStartDeps, chainID eth.ChainID, inL1DerivedFrom eth. if msg.Timestamp < candidate.Timestamp { // If timestamp is older: invariant ensures non-cyclic ordering relative to other messages. // Check that the block that they are included in is cross-safe already. - includedIn, err := d.Check(initChainID, msg.BlockNum, msg.Timestamp, msg.LogIdx, msg.Hash) + includedIn, err := d.Contains(initChainID, + types.ContainsQuery{ + Timestamp: msg.Timestamp, + BlockNum: msg.BlockNum, + LogIdx: msg.LogIdx, + LogHash: msg.Hash, + }) if err != nil { return nil, fmt.Errorf("executing msg %s failed check: %w", msg, err) } - initDerivedFrom, err := d.CrossDerivedFrom(initChainID, includedIn.ID()) + initSource, err := d.CrossDerivedToSource(initChainID, includedIn.ID()) if err != nil { return nil, fmt.Errorf("msg %s included in non-cross-safe block %s: %w", msg, includedIn, err) } - if initDerivedFrom.Number > inL1DerivedFrom.Number { + if initSource.Number > inL1Source.Number { return nil, fmt.Errorf("msg %s was included in block %s derived from %s which is not in cross-safe scope %s: %w", - msg, includedIn, initDerivedFrom, inL1DerivedFrom, types.ErrOutOfScope) + msg, includedIn, initSource, inL1Source, types.ErrOutOfScope) } } else if msg.Timestamp == candidate.Timestamp { // If timestamp is equal: we have to inspect ordering of individual @@ -80,7 +84,13 @@ func CrossSafeHazards(d SafeStartDeps, chainID eth.ChainID, inL1DerivedFrom eth. // Thus check that it was included in a local-safe block, // and then proceed with transitive block checks, // to ensure the local block we depend on is becoming cross-safe also. - includedIn, err := d.Check(initChainID, msg.BlockNum, msg.Timestamp, msg.LogIdx, msg.Hash) + includedIn, err := d.Contains(initChainID, + types.ContainsQuery{ + Timestamp: msg.Timestamp, + BlockNum: msg.BlockNum, + LogIdx: msg.LogIdx, + LogHash: msg.Hash, + }) if err != nil { return nil, fmt.Errorf("executing msg %s failed check: %w", msg, err) } diff --git a/op-supervisor/supervisor/backend/cross/safe_start_test.go b/op-supervisor/supervisor/backend/cross/safe_start_test.go index 0b6b922a012..c362430f5ab 100644 --- a/op-supervisor/supervisor/backend/cross/safe_start_test.go +++ b/op-supervisor/supervisor/backend/cross/safe_start_test.go @@ -15,12 +15,12 @@ func TestCrossSafeHazards(t *testing.T) { t.Run("empty execMsgs", func(t *testing.T) { ssd := &mockSafeStartDeps{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{} // when there are no execMsgs, // no work is done, and no error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.NoError(t, err) require.Empty(t, hazards) }) @@ -32,12 +32,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and CanExecuteAt returns false, // no work is done and an error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorIs(t, err, types.ErrConflict) require.Empty(t, hazards) }) @@ -49,12 +49,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and CanExecuteAt returns false, // no work is done and an error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) @@ -66,12 +66,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and ChainIDFromIndex returns ErrUnknownChain, // an error is returned as a ErrConflict - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorIs(t, err, types.ErrConflict) require.Empty(t, hazards) }) @@ -83,12 +83,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and ChainIDFromIndex returns some other error, // the error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) @@ -100,12 +100,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and CanInitiateAt returns false, // the error is returned as a ErrConflict - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorIs(t, err, types.ErrConflict) require.Empty(t, hazards) }) @@ -117,12 +117,12 @@ func TestCrossSafeHazards(t *testing.T) { }, } chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{} execMsgs := []*types.ExecutingMessage{{}} // when there is one execMsg, and CanInitiateAt returns an error, // the error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) @@ -130,13 +130,13 @@ func TestCrossSafeHazards(t *testing.T) { ssd := &mockSafeStartDeps{} ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 10} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is greater than the candidate, // an error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "breaks timestamp invariant") require.Empty(t, hazards) }) @@ -147,14 +147,14 @@ func TestCrossSafeHazards(t *testing.T) { } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 2} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timetamp is equal to the candidate, // and check returns an error, // that error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) @@ -166,7 +166,7 @@ func TestCrossSafeHazards(t *testing.T) { } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 2} em2 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 2} @@ -174,7 +174,7 @@ func TestCrossSafeHazards(t *testing.T) { // when there are two execMsgs, and both are equal time to the candidate, // and check returns the same includedIn for both // they load the hazards once, and return no error - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.NoError(t, err) require.Equal(t, hazards, map[types.ChainIndex]types.BlockSeal{types.ChainIndex(0): sampleBlockSeal}) }) @@ -193,7 +193,7 @@ func TestCrossSafeHazards(t *testing.T) { } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 2} em2 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 2} @@ -201,7 +201,7 @@ func TestCrossSafeHazards(t *testing.T) { // when there are two execMsgs, and both are equal time to the candidate, // and check returns different includedIn for the two, // an error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "but already depend on") require.Empty(t, hazards) }) @@ -212,126 +212,126 @@ func TestCrossSafeHazards(t *testing.T) { } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 1} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is less than the candidate, // and check returns an error, // that error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) - t.Run("timestamp is less, CrossDerivedFrom returns error", func(t *testing.T) { + t.Run("timestamp is less, DerivedToSource returns error", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { return sampleBlockSeal, nil } - ssd.derivedFromFn = func() (derivedFrom types.BlockSeal, err error) { + ssd.derivedToSrcFn = func() (derivedFrom types.BlockSeal, err error) { return types.BlockSeal{}, errors.New("some error") } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 1} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is less than the candidate, - // and CrossDerivedFrom returns aan error, + // and DerivedToSource returns aan error, // that error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorContains(t, err, "some error") require.Empty(t, hazards) }) - t.Run("timestamp is less, CrossDerivedFrom Number is greater", func(t *testing.T) { + t.Run("timestamp is less, DerivedToSource Number is greater", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { return sampleBlockSeal, nil } - sampleDerivedFrom := types.BlockSeal{Number: 4, Hash: common.BytesToHash([]byte{0x03})} - ssd.derivedFromFn = func() (derivedFrom types.BlockSeal, err error) { - return sampleDerivedFrom, nil + sampleSource := types.BlockSeal{Number: 4, Hash: common.BytesToHash([]byte{0x03})} + ssd.derivedToSrcFn = func() (derivedFrom types.BlockSeal, err error) { + return sampleSource, nil } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{} + inL1Source := eth.BlockID{} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 1} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is less than the candidate, - // and CrossDerivedFrom returns a BlockSeal with a greater Number than the inL1DerivedFrom, + // and DerivedToSource returns a BlockSeal with a greater Number than the inL1Source, // an error is returned as a ErrOutOfScope - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.ErrorIs(t, err, types.ErrOutOfScope) require.Empty(t, hazards) }) - t.Run("timestamp is less, CrossDerivedFrom Number less", func(t *testing.T) { + t.Run("timestamp is less, DerivedToSource Number less", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { return sampleBlockSeal, nil } - sampleDerivedFrom := types.BlockSeal{Number: 1, Hash: common.BytesToHash([]byte{0x03})} - ssd.derivedFromFn = func() (derivedFrom types.BlockSeal, err error) { - return sampleDerivedFrom, nil + sampleSource := types.BlockSeal{Number: 1, Hash: common.BytesToHash([]byte{0x03})} + ssd.derivedToSrcFn = func() (derivedFrom types.BlockSeal, err error) { + return sampleSource, nil } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{Number: 10} + inL1Source := eth.BlockID{Number: 10} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 1} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is less than the candidate, - // and CrossDerivedFrom returns a BlockSeal with a smaller Number than the inL1DerivedFrom, + // and DerivedToSource returns a BlockSeal with a smaller Number than the inL1Source, // no error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.NoError(t, err) require.Empty(t, hazards) }) - t.Run("timestamp is less, CrossDerivedFrom Number equal", func(t *testing.T) { + t.Run("timestamp is less, DerivedToSource Number equal", func(t *testing.T) { ssd := &mockSafeStartDeps{} sampleBlockSeal := types.BlockSeal{Number: 3, Hash: common.BytesToHash([]byte{0x02})} ssd.checkFn = func() (includedIn types.BlockSeal, err error) { return sampleBlockSeal, nil } - sampleDerivedFrom := types.BlockSeal{Number: 1, Hash: common.BytesToHash([]byte{0x03})} - ssd.derivedFromFn = func() (derivedFrom types.BlockSeal, err error) { - return sampleDerivedFrom, nil + sampleSource := types.BlockSeal{Number: 1, Hash: common.BytesToHash([]byte{0x03})} + ssd.derivedToSrcFn = func() (derivedFrom types.BlockSeal, err error) { + return sampleSource, nil } ssd.deps = mockDependencySet{} chainID := eth.ChainIDFromUInt64(0) - inL1DerivedFrom := eth.BlockID{Number: 1} + inL1Source := eth.BlockID{Number: 1} candidate := types.BlockSeal{Timestamp: 2} em1 := &types.ExecutingMessage{Chain: types.ChainIndex(0), Timestamp: 1} execMsgs := []*types.ExecutingMessage{em1} // when there is one execMsg, and the timestamp is less than the candidate, - // and CrossDerivedFrom returns a BlockSeal with a equal to the Number of inL1DerivedFrom, + // and DerivedToSource returns a BlockSeal with a equal to the Number of inL1Source, // no error is returned - hazards, err := CrossSafeHazards(ssd, chainID, inL1DerivedFrom, candidate, execMsgs) + hazards, err := CrossSafeHazards(ssd, chainID, inL1Source, candidate, execMsgs) require.NoError(t, err) require.Empty(t, hazards) }) } type mockSafeStartDeps struct { - deps mockDependencySet - checkFn func() (includedIn types.BlockSeal, err error) - derivedFromFn func() (derivedFrom types.BlockSeal, err error) + deps mockDependencySet + checkFn func() (includedIn types.BlockSeal, err error) + derivedToSrcFn func() (derivedFrom types.BlockSeal, err error) } -func (m *mockSafeStartDeps) Check(chain eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) { +func (m *mockSafeStartDeps) Contains(chain eth.ChainID, q types.ContainsQuery) (includedIn types.BlockSeal, err error) { if m.checkFn != nil { return m.checkFn() } return types.BlockSeal{}, nil } -func (m *mockSafeStartDeps) CrossDerivedFrom(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { - if m.derivedFromFn != nil { - return m.derivedFromFn() +func (m *mockSafeStartDeps) CrossDerivedToSource(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { + if m.derivedToSrcFn != nil { + return m.derivedToSrcFn() } return types.BlockSeal{}, nil } diff --git a/op-supervisor/supervisor/backend/cross/safe_update.go b/op-supervisor/supervisor/backend/cross/safe_update.go index 7f93128f828..ddf97ad8f44 100644 --- a/op-supervisor/supervisor/backend/cross/safe_update.go +++ b/op-supervisor/supervisor/backend/cross/safe_update.go @@ -19,7 +19,7 @@ type CrossSafeDeps interface { SafeStartDeps CandidateCrossSafe(chain eth.ChainID) (candidate types.DerivedBlockRefPair, err error) - NextDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) + NextSource(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) PreviousDerived(chain eth.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) OpenBlock(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) @@ -49,21 +49,21 @@ func CrossSafeUpdate(logger log.Logger, chainID eth.ChainID, d CrossSafeDeps) er } if errors.Is(err, types.ErrConflict) { logger.Warn("Found a conflicting local-safe block that cannot be promoted to cross-safe", - "scope", candidate.DerivedFrom, "invalidated", candidate, "err", err) + "scope", candidate.Source, "invalidated", candidate, "err", err) return d.InvalidateLocalSafe(chainID, candidate) } if !errors.Is(err, types.ErrOutOfScope) { return fmt.Errorf("failed to determine cross-safe update scope of chain %s: %w", chainID, err) } // candidate scope is expected to be set if ErrOutOfScope is returned. - if candidate.DerivedFrom == (eth.BlockRef{}) { + if candidate.Source == (eth.BlockRef{}) { return fmt.Errorf("expected L1 scope to be defined with ErrOutOfScope: %w", err) } - logger.Debug("Cross-safe updating ran out of L1 scope", "scope", candidate.DerivedFrom, "err", err) + logger.Debug("Cross-safe updating ran out of L1 scope", "scope", candidate.Source, "err", err) // bump the L1 scope up, and repeat the prev L2 block, not the candidate - newScope, err := d.NextDerivedFrom(chainID, candidate.DerivedFrom.ID()) + newScope, err := d.NextSource(chainID, candidate.Source.ID()) if err != nil { - return fmt.Errorf("failed to identify new L1 scope to expand to after %s: %w", candidate.DerivedFrom, err) + return fmt.Errorf("failed to identify new L1 scope to expand to after %s: %w", candidate.Source, err) } currentCrossSafe, err := d.CrossSafe(chainID) if err != nil { @@ -77,7 +77,7 @@ func CrossSafeUpdate(logger log.Logger, chainID eth.ChainID, d CrossSafeDeps) er crossSafeRef := currentCrossSafe.Derived.MustWithParent(parent.ID()) logger.Debug("Bumping cross-safe scope", "scope", newScope, "crossSafe", crossSafeRef) if err := d.UpdateCrossSafe(chainID, newScope, crossSafeRef); err != nil { - return fmt.Errorf("failed to update cross-safe head with L1 scope increment to %s and repeat of L2 block %s: %w", candidate.DerivedFrom, crossSafeRef, err) + return fmt.Errorf("failed to update cross-safe head with L1 scope increment to %s and repeat of L2 block %s: %w", candidate.Source, crossSafeRef, err) } return nil } @@ -91,7 +91,7 @@ func scopedCrossSafeUpdate(logger log.Logger, chainID eth.ChainID, d CrossSafeDe if err != nil { return candidate, fmt.Errorf("failed to determine candidate block for cross-safe: %w", err) } - logger.Debug("Candidate cross-safe", "scope", candidate.DerivedFrom, "candidate", candidate.Derived) + logger.Debug("Candidate cross-safe", "scope", candidate.Source, "candidate", candidate.Derived) opened, _, execMsgs, err := d.OpenBlock(chainID, candidate.Derived.Number) if err != nil { return candidate, fmt.Errorf("failed to open block %s: %w", candidate.Derived, err) @@ -99,11 +99,11 @@ func scopedCrossSafeUpdate(logger log.Logger, chainID eth.ChainID, d CrossSafeDe if opened.ID() != candidate.Derived.ID() { return candidate, fmt.Errorf("unsafe L2 DB has %s, but candidate cross-safe was %s: %w", opened, candidate.Derived, types.ErrConflict) } - hazards, err := CrossSafeHazards(d, chainID, candidate.DerivedFrom.ID(), types.BlockSealFromRef(opened), sliceOfExecMsgs(execMsgs)) + hazards, err := CrossSafeHazards(d, chainID, candidate.Source.ID(), types.BlockSealFromRef(opened), sliceOfExecMsgs(execMsgs)) if err != nil { return candidate, fmt.Errorf("failed to determine dependencies of cross-safe candidate %s: %w", candidate.Derived, err) } - if err := HazardSafeFrontierChecks(d, candidate.DerivedFrom.ID(), hazards); err != nil { + if err := HazardSafeFrontierChecks(d, candidate.Source.ID(), hazards); err != nil { return candidate, fmt.Errorf("failed to verify block %s in cross-safe frontier: %w", candidate.Derived, err) } if err := HazardCycleChecks(d.DependencySet(), d, candidate.Derived.Time, hazards); err != nil { @@ -111,8 +111,8 @@ func scopedCrossSafeUpdate(logger log.Logger, chainID eth.ChainID, d CrossSafeDe } // promote the candidate block to cross-safe - if err := d.UpdateCrossSafe(chainID, candidate.DerivedFrom, candidate.Derived); err != nil { - return candidate, fmt.Errorf("failed to update cross-safe head to %s, derived from scope %s: %w", candidate.Derived, candidate.DerivedFrom, err) + if err := d.UpdateCrossSafe(chainID, candidate.Source, candidate.Derived); err != nil { + return candidate, fmt.Errorf("failed to update cross-safe head to %s, derived from scope %s: %w", candidate.Derived, candidate.Source, err) } return candidate, nil } diff --git a/op-supervisor/supervisor/backend/cross/safe_update_test.go b/op-supervisor/supervisor/backend/cross/safe_update_test.go index 11536123397..09b5ac444da 100644 --- a/op-supervisor/supervisor/backend/cross/safe_update_test.go +++ b/op-supervisor/supervisor/backend/cross/safe_update_test.go @@ -22,8 +22,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (pair types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1} @@ -48,8 +48,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (pair types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { @@ -70,8 +70,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (pair types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { @@ -89,8 +89,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (pair types.DerivedBlockRefPair, err error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { @@ -100,7 +100,7 @@ func TestCrossSafeUpdate(t *testing.T) { csd.invalidateLocalSafeFn = func(id eth.ChainID, p types.DerivedBlockRefPair) error { require.Equal(t, chainID, id) require.Equal(t, candidate, p.Derived) - require.Equal(t, candidateScope, p.DerivedFrom) + require.Equal(t, candidateScope, p.Source) invalidated = true return nil } @@ -117,15 +117,15 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { return eth.BlockRef{}, 0, nil, types.ErrOutOfScope } newScope := eth.BlockRef{Number: 3} - csd.nextDerivedFromFn = func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { + csd.nextSourceFn = func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { return newScope, nil } currentCrossSafe := types.BlockSeal{Number: 5} @@ -148,7 +148,7 @@ func TestCrossSafeUpdate(t *testing.T) { } // when scopedCrossSafeUpdate returns Out of Scope error, // CrossSafeUpdate proceeds anyway and calls UpdateCrossSafe - // the update uses the new scope returned by NextDerivedFrom + // the update uses the new scope returned by NextSource // and a crossSafeRef made from the current crossSafe and its parent err := CrossSafeUpdate(logger, chainID, csd) require.NoError(t, err) @@ -157,7 +157,7 @@ func TestCrossSafeUpdate(t *testing.T) { crossSafeRef := currentCrossSafe.MustWithParent(parent.ID()) require.Equal(t, crossSafeRef, updatingCandidate) }) - t.Run("NextDerivedFrom returns error", func(t *testing.T) { + t.Run("NextSource returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) chainID := eth.ChainIDFromUInt64(0) csd := &mockCrossSafeDeps{} @@ -165,19 +165,19 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { return eth.BlockRef{}, 0, nil, types.ErrOutOfScope } - csd.nextDerivedFromFn = func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { + csd.nextSourceFn = func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { return eth.BlockRef{}, errors.New("some error") } csd.deps = mockDependencySet{} // when scopedCrossSafeUpdate returns Out of Scope error, - // and NextDerivedFrom returns an error, + // and NextSource returns an error, // the error is returned err := CrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") @@ -190,8 +190,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { @@ -215,8 +215,8 @@ func TestCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } csd.openBlockFn = func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) { @@ -246,7 +246,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { // the error is returned candidate, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") - require.Equal(t, eth.BlockRef{}, candidate.DerivedFrom) + require.Equal(t, eth.BlockRef{}, candidate.Source) }) t.Run("CandidateCrossSafe returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -259,7 +259,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { // the error is returned pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") - require.Equal(t, eth.BlockRef{}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{}, pair.Source) }) t.Run("candidate does not match opened block", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -268,8 +268,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidate := eth.BlockRef{Number: 1} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{}, - Derived: candidate, + Source: eth.BlockRef{}, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 2} @@ -280,7 +280,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { // an ErrConflict is returned pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorIs(t, err, types.ErrConflict) - require.Equal(t, eth.BlockRef{}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{}, pair.Source) }) t.Run("CrossSafeHazards returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -289,8 +289,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidate := eth.BlockRef{Number: 1} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{}, - Derived: candidate, + Source: eth.BlockRef{}, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1} @@ -308,7 +308,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") require.ErrorContains(t, err, "dependencies of cross-safe candidate") - require.Equal(t, eth.BlockRef{}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{}, pair.Source) }) t.Run("HazardSafeFrontierChecks returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -317,8 +317,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidate := eth.BlockRef{Number: 1} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: eth.BlockRef{}, - Derived: candidate, + Source: eth.BlockRef{}, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1} @@ -345,7 +345,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") require.ErrorContains(t, err, "frontier") - require.Equal(t, eth.BlockRef{}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{}, pair.Source) }) t.Run("HazardCycleChecks returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -355,8 +355,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1, Time: 1} @@ -374,7 +374,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "cycle detected") require.ErrorContains(t, err, "failed to verify block") - require.Equal(t, eth.BlockRef{Number: 2}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{Number: 2}, pair.Source) }) t.Run("UpdateCrossSafe returns error", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -384,8 +384,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1} @@ -405,7 +405,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { pair, err := scopedCrossSafeUpdate(logger, chainID, csd) require.ErrorContains(t, err, "some error") require.ErrorContains(t, err, "failed to update") - require.Equal(t, eth.BlockRef{Number: 2}, pair.DerivedFrom) + require.Equal(t, eth.BlockRef{Number: 2}, pair.Source) }) t.Run("successful update", func(t *testing.T) { logger := testlog.Logger(t, log.LevelDebug) @@ -415,8 +415,8 @@ func TestScopedCrossSafeUpdate(t *testing.T) { candidateScope := eth.BlockRef{Number: 2} csd.candidateCrossSafeFn = func() (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - DerivedFrom: candidateScope, - Derived: candidate, + Source: candidateScope, + Derived: candidate, }, nil } opened := eth.BlockRef{Number: 1} @@ -444,7 +444,7 @@ func TestScopedCrossSafeUpdate(t *testing.T) { require.Equal(t, chainID, updatingChain) require.Equal(t, candidateScope, updatingCandidateScope) require.Equal(t, candidate, updatingCandidate) - require.Equal(t, candidateScope, pair.DerivedFrom) + require.Equal(t, candidateScope, pair.Source) require.NoError(t, err) }) } @@ -455,7 +455,7 @@ type mockCrossSafeDeps struct { candidateCrossSafeFn func() (candidate types.DerivedBlockRefPair, err error) openBlockFn func(chainID eth.ChainID, blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) updateCrossSafeFn func(chain eth.ChainID, l1View eth.BlockRef, lastCrossDerived eth.BlockRef) error - nextDerivedFromFn func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) + nextSourceFn func(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) previousDerivedFn func(chain eth.ChainID, derived eth.BlockID) (prevDerived types.BlockSeal, err error) checkFn func(chainID eth.ChainID, blockNum uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error) invalidateLocalSafeFn func(chainID eth.ChainID, candidate types.DerivedBlockRefPair) error @@ -481,20 +481,20 @@ func (m *mockCrossSafeDeps) DependencySet() depset.DependencySet { return m.deps } -func (m *mockCrossSafeDeps) CrossDerivedFrom(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { +func (m *mockCrossSafeDeps) CrossDerivedToSource(chainID eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockCrossSafeDeps) Check(chainID eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error) { +func (m *mockCrossSafeDeps) Contains(chainID eth.ChainID, q types.ContainsQuery) (types.BlockSeal, error) { if m.checkFn != nil { - return m.checkFn(chainID, blockNum, logIdx, logHash) + return m.checkFn(chainID, q.BlockNum, q.LogIdx, q.LogHash) } return types.BlockSeal{}, nil } -func (m *mockCrossSafeDeps) NextDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { - if m.nextDerivedFromFn != nil { - return m.nextDerivedFromFn(chain, derivedFrom) +func (m *mockCrossSafeDeps) NextSource(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { + if m.nextSourceFn != nil { + return m.nextSourceFn(chain, derivedFrom) } return eth.BlockRef{}, nil } diff --git a/op-supervisor/supervisor/backend/cross/unsafe_start.go b/op-supervisor/supervisor/backend/cross/unsafe_start.go index b6ab931c501..8a57d9cd197 100644 --- a/op-supervisor/supervisor/backend/cross/unsafe_start.go +++ b/op-supervisor/supervisor/backend/cross/unsafe_start.go @@ -4,15 +4,13 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type UnsafeStartDeps interface { - Check(chain eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) + Contains(chain eth.ChainID, query types.ContainsQuery) (includedIn types.BlockSeal, err error) IsCrossUnsafe(chainID eth.ChainID, block eth.BlockID) error @@ -61,7 +59,13 @@ func CrossUnsafeHazards(d UnsafeStartDeps, chainID eth.ChainID, if msg.Timestamp < candidate.Timestamp { // If timestamp is older: invariant ensures non-cyclic ordering relative to other messages. // Check that the block that they are included in is cross-safe already. - includedIn, err := d.Check(initChainID, msg.BlockNum, msg.Timestamp, msg.LogIdx, msg.Hash) + includedIn, err := d.Contains(initChainID, + types.ContainsQuery{ + Timestamp: msg.Timestamp, + BlockNum: msg.BlockNum, + LogIdx: msg.LogIdx, + LogHash: msg.Hash, + }) if err != nil { return nil, fmt.Errorf("executing msg %s failed check: %w", msg, err) } @@ -78,7 +82,13 @@ func CrossUnsafeHazards(d UnsafeStartDeps, chainID eth.ChainID, // Thus check that it was included in a local-unsafe block, // and then proceed with transitive block checks, // to ensure the local block we depend on is becoming cross-unsafe also. - includedIn, err := d.Check(initChainID, msg.BlockNum, msg.Timestamp, msg.LogIdx, msg.Hash) + includedIn, err := d.Contains(initChainID, + types.ContainsQuery{ + Timestamp: msg.Timestamp, + BlockNum: msg.BlockNum, + LogIdx: msg.LogIdx, + LogHash: msg.Hash, + }) if err != nil { return nil, fmt.Errorf("executing msg %s failed check: %w", msg, err) } diff --git a/op-supervisor/supervisor/backend/cross/unsafe_start_test.go b/op-supervisor/supervisor/backend/cross/unsafe_start_test.go index 70cb6bc4e6d..e12d7c14fa4 100644 --- a/op-supervisor/supervisor/backend/cross/unsafe_start_test.go +++ b/op-supervisor/supervisor/backend/cross/unsafe_start_test.go @@ -261,7 +261,7 @@ type mockUnsafeStartDeps struct { isCrossUnsafeFn func() error } -func (m *mockUnsafeStartDeps) Check(chain eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) { +func (m *mockUnsafeStartDeps) Contains(chain eth.ChainID, q types.ContainsQuery) (includedIn types.BlockSeal, err error) { if m.checkFn != nil { return m.checkFn() } diff --git a/op-supervisor/supervisor/backend/cross/unsafe_update_test.go b/op-supervisor/supervisor/backend/cross/unsafe_update_test.go index 5a0403b3cf7..f030b9712cd 100644 --- a/op-supervisor/supervisor/backend/cross/unsafe_update_test.go +++ b/op-supervisor/supervisor/backend/cross/unsafe_update_test.go @@ -198,9 +198,9 @@ func (m *mockCrossUnsafeDeps) DependencySet() depset.DependencySet { return m.deps } -func (m *mockCrossUnsafeDeps) Check(chainID eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error) { +func (m *mockCrossUnsafeDeps) Contains(chainID eth.ChainID, q types.ContainsQuery) (types.BlockSeal, error) { if m.checkFn != nil { - return m.checkFn(chainID, blockNum, timestamp, logIdx, logHash) + return m.checkFn(chainID, q.BlockNum, q.Timestamp, q.LogIdx, q.LogHash) } return types.BlockSeal{}, nil } diff --git a/op-supervisor/supervisor/backend/db/anchor.go b/op-supervisor/supervisor/backend/db/anchor.go index d58b86c9744..d7d8375d93e 100644 --- a/op-supervisor/supervisor/backend/db/anchor.go +++ b/op-supervisor/supervisor/backend/db/anchor.go @@ -13,10 +13,10 @@ func (db *ChainsDB) maybeInitSafeDB(id eth.ChainID, anchor types.DerivedBlockRef _, err := db.LocalSafe(id) if errors.Is(err, types.ErrFuture) { db.logger.Debug("initializing chain database", "chain", id) - if err := db.UpdateCrossSafe(id, anchor.DerivedFrom, anchor.Derived); err != nil { + if err := db.UpdateCrossSafe(id, anchor.Source, anchor.Derived); err != nil { db.logger.Warn("failed to initialize cross safe", "chain", id, "error", err) } - db.UpdateLocalSafe(id, anchor.DerivedFrom, anchor.Derived) + db.UpdateLocalSafe(id, anchor.Source, anchor.Derived) } else if err != nil { db.logger.Warn("failed to check if chain database is initialized", "chain", id, "error", err) } else { diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index 6a424cad346..f62025ecec8 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -44,37 +44,43 @@ type LogStorage interface { // This can be used to check the validity of cross-chain interop events. // The block-seal of the blockNum block, that the log was included in, is returned. // This seal may be fully zeroed, without error, if the block isn't fully known yet. - Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) + Contains(types.ContainsQuery) (includedIn types.BlockSeal, err error) // OpenBlock accumulates the ExecutingMessage events for a block and returns them OpenBlock(blockNum uint64) (ref eth.BlockRef, logCount uint32, execMsgs map[uint32]*types.ExecutingMessage, err error) } -type LocalDerivedFromStorage interface { +type DerivationStorage interface { + // basic info First() (pair types.DerivedBlockSealPair, err error) - Latest() (pair types.DerivedBlockSealPair, err error) + Last() (pair types.DerivedBlockSealPair, err error) + + // mapping from source<>derived + DerivedToFirstSource(derived eth.BlockID) (source types.BlockSeal, err error) + SourceToLastDerived(source eth.BlockID) (derived types.BlockSeal, err error) + + // traversal + Next(pair types.DerivedIDPair) (next types.DerivedBlockSealPair, err error) + NextSource(source eth.BlockID) (nextSource types.BlockSeal, err error) + NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) + PreviousSource(source eth.BlockID) (prevSource types.BlockSeal, err error) + PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) + + // type-specific Invalidated() (pair types.DerivedBlockSealPair, err error) - AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error + ContainsDerived(derived eth.BlockID) error + + // writing + AddDerived(source eth.BlockRef, derived eth.BlockRef) error ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidated common.Hash) (types.DerivedBlockSealPair, error) + + // rewining RewindAndInvalidate(invalidated types.DerivedBlockRefPair) error - LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error) - IsDerived(derived eth.BlockID) error - DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error) - FirstAfter(derivedFrom, derived eth.BlockID) (next types.DerivedBlockSealPair, err error) - NextDerivedFrom(derivedFrom eth.BlockID) (nextDerivedFrom types.BlockSeal, err error) - NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) - PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) - PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) RewindToScope(scope eth.BlockID) error RewindToFirstDerived(v eth.BlockID) error } -var _ LocalDerivedFromStorage = (*fromda.DB)(nil) - -type CrossDerivedFromStorage interface { - LocalDerivedFromStorage - // This will start to differ with reorg support -} +var _ DerivationStorage = (*fromda.DB)(nil) var _ LogStorage = (*logs.DB)(nil) @@ -89,10 +95,10 @@ type ChainsDB struct { crossUnsafe locks.RWMap[eth.ChainID, *locks.RWValue[types.BlockSeal]] // local-safe: index of what we optimistically know about L2 blocks being derived from L1 - localDBs locks.RWMap[eth.ChainID, LocalDerivedFromStorage] + localDBs locks.RWMap[eth.ChainID, DerivationStorage] // cross-safe: index of L2 blocks we know to only have cross-L2 valid dependencies - crossDBs locks.RWMap[eth.ChainID, CrossDerivedFromStorage] + crossDBs locks.RWMap[eth.ChainID, DerivationStorage] // finalized: the L1 finality progress. This can be translated into what may be considered as finalized in L2. // It is initially zeroed, and the L2 finality query will return @@ -128,7 +134,7 @@ func (db *ChainsDB) OnEvent(ev event.Event) bool { db.maybeInitEventsDB(x.ChainID, x.Anchor) db.maybeInitSafeDB(x.ChainID, x.Anchor) case superevents.LocalDerivedEvent: - db.UpdateLocalSafe(x.ChainID, x.Derived.DerivedFrom, x.Derived.Derived) + db.UpdateLocalSafe(x.ChainID, x.Derived.Source, x.Derived.Derived) case superevents.FinalizedL1RequestEvent: db.onFinalizedL1(x.FinalizedL1) case superevents.ReplaceBlockEvent: @@ -147,7 +153,7 @@ func (db *ChainsDB) AddLogDB(chainID eth.ChainID, logDB LogStorage) { db.logDBs.Set(chainID, logDB) } -func (db *ChainsDB) AddLocalDerivedFromDB(chainID eth.ChainID, dfDB LocalDerivedFromStorage) { +func (db *ChainsDB) AddLocalDerivationDB(chainID eth.ChainID, dfDB DerivationStorage) { if db.localDBs.Has(chainID) { db.logger.Warn("overwriting existing local derived-from DB for chain", "chain", chainID) } @@ -155,7 +161,7 @@ func (db *ChainsDB) AddLocalDerivedFromDB(chainID eth.ChainID, dfDB LocalDerived db.localDBs.Set(chainID, dfDB) } -func (db *ChainsDB) AddCrossDerivedFromDB(chainID eth.ChainID, dfDB CrossDerivedFromStorage) { +func (db *ChainsDB) AddCrossDerivationDB(chainID eth.ChainID, dfDB DerivationStorage) { if db.crossDBs.Has(chainID) { db.logger.Warn("overwriting existing cross derived-from DB for chain", "chain", chainID) } diff --git a/op-supervisor/supervisor/backend/db/file_layout.go b/op-supervisor/supervisor/backend/db/file_layout.go index 7d97652124f..aad9991fee6 100644 --- a/op-supervisor/supervisor/backend/db/file_layout.go +++ b/op-supervisor/supervisor/backend/db/file_layout.go @@ -8,7 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) -func prepLocalDerivedFromDBPath(chainID eth.ChainID, datadir string) (string, error) { +func prepLocalDerivationDBPath(chainID eth.ChainID, datadir string) (string, error) { dir, err := prepChainDir(chainID, datadir) if err != nil { return "", err @@ -16,7 +16,7 @@ func prepLocalDerivedFromDBPath(chainID eth.ChainID, datadir string) (string, er return filepath.Join(dir, "local_safe.db"), nil } -func prepCrossDerivedFromDBPath(chainID eth.ChainID, datadir string) (string, error) { +func prepCrossDerivationDBPath(chainID eth.ChainID, datadir string) (string, error) { dir, err := prepChainDir(chainID, datadir) if err != nil { return "", err diff --git a/op-supervisor/supervisor/backend/db/fromda/db.go b/op-supervisor/supervisor/backend/db/fromda/db.go index c16223cfa2c..6dbf9686bcb 100644 --- a/op-supervisor/supervisor/backend/db/fromda/db.go +++ b/op-supervisor/supervisor/backend/db/fromda/db.go @@ -70,12 +70,12 @@ func (db *DB) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, db.rwLock.RLock() defer db.rwLock.RUnlock() // last is always the latest view, and thus canonical. - _, lastCanonical, err := db.lastDerivedFrom(derived.Number) + _, lastCanonical, err := db.derivedNumToLastSource(derived.Number) if err != nil { return types.BlockSeal{}, fmt.Errorf("failed to find last derived %d: %w", derived.Number, err) } // get the first time this L2 block was seen. - selfIndex, self, err := db.firstDerivedFrom(derived.Number) + selfIndex, self, err := db.derivedNumToFirstSource(derived.Number) if err != nil { return types.BlockSeal{}, fmt.Errorf("failed to find first derived %d: %w", derived.Number, err) } @@ -98,7 +98,7 @@ func (db *DB) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, // derivedFrom: the L1 block that the L2 block is safe for (not necessarily the first, multiple L2 blocks may be derived from the same L1 block). // derived: the L2 block that was derived (not necessarily the first, the L1 block may have been empty and repeated the last safe L2 block). // If the last entry is invalidated, this returns a types.ErrAwaitReplacementBlock error. -func (db *DB) Latest() (pair types.DerivedBlockSealPair, err error) { +func (db *DB) Last() (pair types.DerivedBlockSealPair, err error) { db.rwLock.RLock() defer db.rwLock.RUnlock() link, err := db.latest() @@ -108,6 +108,19 @@ func (db *DB) Latest() (pair types.DerivedBlockSealPair, err error) { return link.sealOrErr() } +// latest is like Latest, but without lock, for internal use. +func (db *DB) latest() (link LinkEntry, err error) { + lastIndex := db.store.LastEntryIdx() + if lastIndex < 0 { + return LinkEntry{}, types.ErrFuture + } + last, err := db.readAt(lastIndex) + if err != nil { + return LinkEntry{}, fmt.Errorf("failed to read last derivation data: %w", err) + } + return last, nil +} + func (db *DB) Invalidated() (pair types.DerivedBlockSealPair, err error) { db.rwLock.RLock() defer db.rwLock.RUnlock() @@ -119,36 +132,23 @@ func (db *DB) Invalidated() (pair types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, fmt.Errorf("last entry %s is not invalidated: %w", link, types.ErrConflict) } return types.DerivedBlockSealPair{ - DerivedFrom: link.derivedFrom, - Derived: link.derived, + Source: link.source, + Derived: link.derived, }, nil } -// latest is like Latest, but without lock, for internal use. -func (db *DB) latest() (link LinkEntry, err error) { - lastIndex := db.store.LastEntryIdx() - if lastIndex < 0 { - return LinkEntry{}, types.ErrFuture - } - last, err := db.readAt(lastIndex) - if err != nil { - return LinkEntry{}, fmt.Errorf("failed to read last derivation data: %w", err) - } - return last, nil -} - // LastDerivedAt returns the last L2 block derived from the given L1 block. // This may return types.ErrAwaitReplacementBlock if the entry was invalidated and needs replacement. -func (db *DB) LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { +func (db *DB) SourceToLastDerived(source eth.BlockID) (derived types.BlockSeal, err error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - _, link, err := db.lastDerivedAt(derivedFrom.Number) + _, link, err := db.sourceNumToLastDerived(source.Number) if err != nil { return types.BlockSeal{}, err } - if link.derivedFrom.ID() != derivedFrom { + if link.source.ID() != source { return types.BlockSeal{}, fmt.Errorf("searched for last derived-from %s but found %s: %w", - derivedFrom, link.derivedFrom, types.ErrConflict) + source, link.source, types.ErrConflict) } if link.invalidated { return types.BlockSeal{}, types.ErrAwaitReplacementBlock @@ -162,7 +162,7 @@ func (db *DB) NextDerived(derived eth.BlockID) (pair types.DerivedBlockSealPair, db.rwLock.RLock() defer db.rwLock.RUnlock() // get the last time this L2 block was seen. - selfIndex, self, err := db.lastDerivedFrom(derived.Number) + selfIndex, self, err := db.derivedNumToLastSource(derived.Number) if err != nil { return types.DerivedBlockSealPair{}, fmt.Errorf("failed to find derived %d: %w", derived.Number, err) } @@ -176,16 +176,16 @@ func (db *DB) NextDerived(derived eth.BlockID) (pair types.DerivedBlockSealPair, return next.sealOrErr() } -// IsDerived checks if the given block is the canonical block at the given chain. +// ContainsDerived checks if the given block is canonical for the given chain. // This returns an ErrFuture if the block is not known yet. // An ErrConflict if there is a different block. // Or an ErrAwaitReplacementBlock if it was invalidated. -func (db *DB) IsDerived(derived eth.BlockID) error { +func (db *DB) ContainsDerived(derived eth.BlockID) error { db.rwLock.RLock() defer db.rwLock.RUnlock() // Take the last entry: this will be the latest canonical view, // if the block was previously invalidated. - _, link, err := db.lastDerivedFrom(derived.Number) + _, link, err := db.derivedNumToLastSource(derived.Number) if err != nil { return err } @@ -199,12 +199,12 @@ func (db *DB) IsDerived(derived eth.BlockID) error { return nil } -// DerivedFrom determines where a L2 block was first derived from. +// DerivedToFirstSource determines where a L2 block was first derived from. // (a L2 block may repeat if the following L1 blocks are empty and don't produce additional L2 blocks) -func (db *DB) DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { +func (db *DB) DerivedToFirstSource(derived eth.BlockID) (types.BlockSeal, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - _, link, err := db.firstDerivedFrom(derived.Number) + _, link, err := db.derivedNumToFirstSource(derived.Number) if err != nil { return types.BlockSeal{}, err } @@ -212,73 +212,72 @@ func (db *DB) DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err return types.BlockSeal{}, fmt.Errorf("searched for first derived %s but found %s: %w", derived, link.derived, types.ErrConflict) } - return link.derivedFrom, nil + return link.source, nil } -func (db *DB) PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { +func (db *DB) PreviousSource(source eth.BlockID) (types.BlockSeal, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - return db.previousDerivedFrom(derivedFrom) + return db.previousSource(source) } -func (db *DB) previousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { +func (db *DB) previousSource(source eth.BlockID) (types.BlockSeal, error) { // get the last time this L1 block was seen. - selfIndex, self, err := db.firstDerivedAt(derivedFrom.Number) + selfIndex, self, err := db.sourceNumToFirstDerived(source.Number) if err != nil { - return types.BlockSeal{}, fmt.Errorf("failed to find derived %d: %w", derivedFrom.Number, err) + return types.BlockSeal{}, fmt.Errorf("failed to find derived %d: %w", source.Number, err) } - if self.derivedFrom.ID() != derivedFrom { - return types.BlockSeal{}, fmt.Errorf("found %s, but expected %s: %w", self.derivedFrom, derivedFrom, types.ErrConflict) + if self.source.ID() != source { + return types.BlockSeal{}, fmt.Errorf("found %s, but expected %s: %w", self.source, source, types.ErrConflict) } if selfIndex == 0 { // genesis block has a zeroed block as parent block - if self.derivedFrom.Number == 0 { + if self.source.Number == 0 { return types.BlockSeal{}, nil } else { return types.BlockSeal{}, - fmt.Errorf("cannot find previous derived before start of database: %s (%w)", derivedFrom, types.ErrPreviousToFirst) + fmt.Errorf("cannot find previous derived before start of database: %s (%w)", source, types.ErrPreviousToFirst) } } prev, err := db.readAt(selfIndex - 1) if err != nil { - return types.BlockSeal{}, fmt.Errorf("cannot find previous derived before %s: %w", derivedFrom, err) + return types.BlockSeal{}, fmt.Errorf("cannot find previous derived before %s: %w", source, err) } - return prev.derivedFrom, nil + return prev.source, nil } -// NextDerivedFrom finds the next L1 block after derivedFrom -func (db *DB) NextDerivedFrom(derivedFrom eth.BlockID) (nextDerivedFrom types.BlockSeal, err error) { +// NextSource finds the next source after the given source +func (db *DB) NextSource(source eth.BlockID) (types.BlockSeal, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - selfIndex, self, err := db.lastDerivedAt(derivedFrom.Number) + selfIndex, self, err := db.sourceNumToLastDerived(source.Number) if err != nil { - return types.BlockSeal{}, fmt.Errorf("failed to find derived-from %d: %w", derivedFrom.Number, err) + return types.BlockSeal{}, fmt.Errorf("failed to find derived-from %d: %w", source.Number, err) } - if self.derivedFrom.ID() != derivedFrom { - return types.BlockSeal{}, fmt.Errorf("found %s, but expected %s: %w", self.derivedFrom, derivedFrom, types.ErrConflict) + if self.source.ID() != source { + return types.BlockSeal{}, fmt.Errorf("found %s, but expected %s: %w", self.source, source, types.ErrConflict) } next, err := db.readAt(selfIndex + 1) if err != nil { - return types.BlockSeal{}, fmt.Errorf("cannot find next derived-from after %s: %w", derivedFrom, err) + return types.BlockSeal{}, fmt.Errorf("cannot find next derived-from after %s: %w", source, err) } - return next.derivedFrom, nil + return next.source, nil } -// FirstAfter determines the next entry after the given pair of derivedFrom, derived. -// Either one or both of the two entries will be an increment by 1. +// Next returns the next Derived Block Pair after the given pair. // This may return types.ErrAwaitReplacementBlock if the entry was invalidated and needs replacement. -func (db *DB) FirstAfter(derivedFrom, derived eth.BlockID) (pair types.DerivedBlockSealPair, err error) { +func (db *DB) Next(pair types.DerivedIDPair) (types.DerivedBlockSealPair, error) { db.rwLock.RLock() defer db.rwLock.RUnlock() - selfIndex, selfLink, err := db.lookup(derivedFrom.Number, derived.Number) + selfIndex, selfLink, err := db.lookup(pair.Source.Number, pair.Derived.Number) if err != nil { return types.DerivedBlockSealPair{}, err } - if selfLink.derivedFrom.ID() != derivedFrom { - return types.DerivedBlockSealPair{}, fmt.Errorf("DB has derived-from %s but expected %s: %w", selfLink.derivedFrom, derivedFrom, types.ErrConflict) + if selfLink.source.ID() != pair.Source { + return types.DerivedBlockSealPair{}, fmt.Errorf("DB has derived-from %s but expected %s: %w", selfLink.source, pair.Source, types.ErrConflict) } - if selfLink.derived.ID() != derived { - return types.DerivedBlockSealPair{}, fmt.Errorf("DB has derived %s but expected %s: %w", selfLink.derived, derived, types.ErrConflict) + if selfLink.derived.ID() != pair.Derived { + return types.DerivedBlockSealPair{}, fmt.Errorf("DB has derived %s but expected %s: %w", selfLink.derived, pair.Derived, types.ErrConflict) } next, err := db.readAt(selfIndex + 1) if err != nil { @@ -287,38 +286,41 @@ func (db *DB) FirstAfter(derivedFrom, derived eth.BlockID) (pair types.DerivedBl return next.sealOrErr() } -func (db *DB) lastDerivedFrom(derived uint64) (entrydb.EntryIdx, LinkEntry, error) { - return db.find(true, func(link LinkEntry) int { - return cmp.Compare(derived, link.derived.Number) +func (db *DB) derivedNumToFirstSource(derivedNum uint64) (entrydb.EntryIdx, LinkEntry, error) { + // Forward: prioritize the first entry. + return db.find(false, func(link LinkEntry) int { + return cmp.Compare(link.derived.Number, derivedNum) }) } -func (db *DB) firstDerivedFrom(derived uint64) (entrydb.EntryIdx, LinkEntry, error) { - return db.find(false, func(link LinkEntry) int { - return cmp.Compare(link.derived.Number, derived) +func (db *DB) derivedNumToLastSource(derivedNum uint64) (entrydb.EntryIdx, LinkEntry, error) { + // Reverse: prioritize the last entry. + return db.find(true, func(link LinkEntry) int { + return cmp.Compare(derivedNum, link.derived.Number) }) } -func (db *DB) lookup(derivedFrom, derived uint64) (entrydb.EntryIdx, LinkEntry, error) { +func (db *DB) sourceNumToFirstDerived(sourceNum uint64) (entrydb.EntryIdx, LinkEntry, error) { + // Forward: prioritize the first entry. return db.find(false, func(link LinkEntry) int { - res := cmp.Compare(link.derived.Number, derived) - if res == 0 { - return cmp.Compare(link.derivedFrom.Number, derivedFrom) - } - return res + return cmp.Compare(link.source.Number, sourceNum) }) } -func (db *DB) lastDerivedAt(derivedFrom uint64) (entrydb.EntryIdx, LinkEntry, error) { +func (db *DB) sourceNumToLastDerived(sourceNum uint64) (entrydb.EntryIdx, LinkEntry, error) { // Reverse: prioritize the last entry. return db.find(true, func(link LinkEntry) int { - return cmp.Compare(derivedFrom, link.derivedFrom.Number) + return cmp.Compare(sourceNum, link.source.Number) }) } -func (db *DB) firstDerivedAt(derivedFrom uint64) (entrydb.EntryIdx, LinkEntry, error) { +func (db *DB) lookup(derivedFrom, derived uint64) (entrydb.EntryIdx, LinkEntry, error) { return db.find(false, func(link LinkEntry) int { - return cmp.Compare(link.derivedFrom.Number, derivedFrom) + res := cmp.Compare(link.derived.Number, derived) + if res == 0 { + return cmp.Compare(link.source.Number, derivedFrom) + } + return res }) } diff --git a/op-supervisor/supervisor/backend/db/fromda/db_invariants_test.go b/op-supervisor/supervisor/backend/db/fromda/db_invariants_test.go index abde003c245..a4bdaa9c489 100644 --- a/op-supervisor/supervisor/backend/db/fromda/db_invariants_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/db_invariants_test.go @@ -43,7 +43,7 @@ func checkDBInvariants(t *testing.T, dbPath string, m *stubMetrics) { linkInvariants := []linkInvariant{ invariantDerivedTimestamp, - invariantDerivedFromTimestamp, + invariantSourceTimestamp, invariantNumberIncrement, } for i, link := range links { @@ -94,25 +94,25 @@ func invariantNumberIncrement(prev, current LinkEntry) error { // derived stays the same if the new L1 block is empty. derivedSame := current.derived.Number == prev.derived.Number // derivedFrom stays the same if this L2 block is derived from the same L1 block as the last L2 block - derivedFromSame := current.derivedFrom.Number == prev.derivedFrom.Number + derivedFromSame := current.source.Number == prev.source.Number // At least one of the two must increment, otherwise we are just repeating data in the DB. if derivedSame && derivedFromSame { return fmt.Errorf("expected at least either derivedFrom or derived to increment, but both have same number") } derivedIncrement := current.derived.Number == prev.derived.Number+1 - derivedFromIncrement := current.derivedFrom.Number == prev.derivedFrom.Number+1 + derivedFromIncrement := current.source.Number == prev.source.Number+1 if !(derivedSame || derivedIncrement) { return fmt.Errorf("expected derived to either stay the same or increment, got prev %s current %s", prev.derived, current.derived) } if !(derivedFromSame || derivedFromIncrement) { - return fmt.Errorf("expected derivedFrom to either stay the same or increment, got prev %s current %s", prev.derivedFrom, current.derivedFrom) + return fmt.Errorf("expected derivedFrom to either stay the same or increment, got prev %s current %s", prev.source, current.source) } return nil } -func invariantDerivedFromTimestamp(prev, current LinkEntry) error { - if current.derivedFrom.Timestamp < prev.derivedFrom.Timestamp { - return fmt.Errorf("derivedFrom timestamp must be >=, current: %s, prev: %s", current.derivedFrom, prev.derivedFrom) +func invariantSourceTimestamp(prev, current LinkEntry) error { + if current.source.Timestamp < prev.source.Timestamp { + return fmt.Errorf("source timestamp must be >=, current: %s, prev: %s", current.source, prev.source) } return nil } diff --git a/op-supervisor/supervisor/backend/db/fromda/db_test.go b/op-supervisor/supervisor/backend/db/fromda/db_test.go index 2175dd73afd..a283b07ed77 100644 --- a/op-supervisor/supervisor/backend/db/fromda/db_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/db_test.go @@ -72,16 +72,16 @@ func TestEmptyDB(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {}, func(t *testing.T, db *DB, m *stubMetrics) { - _, err := db.Latest() + _, err := db.Last() require.ErrorIs(t, err, types.ErrFuture) _, err = db.First() require.ErrorIs(t, err, types.ErrFuture) - _, err = db.LastDerivedAt(eth.BlockID{}) + _, err = db.SourceToLastDerived(eth.BlockID{}) require.ErrorIs(t, err, types.ErrFuture) - _, err = db.DerivedFrom(eth.BlockID{}) + _, err = db.DerivedToFirstSource(eth.BlockID{}) require.ErrorIs(t, err, types.ErrFuture) _, err = db.PreviousDerived(eth.BlockID{}) @@ -90,13 +90,15 @@ func TestEmptyDB(t *testing.T) { _, err = db.NextDerived(eth.BlockID{}) require.ErrorIs(t, err, types.ErrFuture) - _, err = db.PreviousDerivedFrom(eth.BlockID{}) + _, err = db.PreviousSource(eth.BlockID{}) require.ErrorIs(t, err, types.ErrFuture) - _, err = db.NextDerivedFrom(eth.BlockID{}) + _, err = db.NextSource(eth.BlockID{}) require.ErrorIs(t, err, types.ErrFuture) - _, err = db.FirstAfter(eth.BlockID{}, eth.BlockID{}) + _, err = db.Next(types.DerivedIDPair{ + Source: eth.BlockID{}, + Derived: eth.BlockID{}}) require.ErrorIs(t, err, types.ErrFuture) }) } @@ -131,51 +133,57 @@ func toRef(seal types.BlockSeal, parentHash common.Hash) eth.BlockRef { } func TestSingleEntryDB(t *testing.T) { - expectedDerivedFrom := mockL1(0) + expectedSource := mockL1(0) expectedDerived := mockL2(2) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { - require.NoError(t, db.AddDerived(toRef(expectedDerivedFrom, mockL1(0).Hash), toRef(expectedDerived, mockL2(0).Hash))) + require.NoError(t, db.AddDerived(toRef(expectedSource, mockL1(0).Hash), toRef(expectedDerived, mockL2(0).Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { // First pair, err := db.First() require.NoError(t, err) - require.Equal(t, expectedDerivedFrom, pair.DerivedFrom) + require.Equal(t, expectedSource, pair.Source) require.Equal(t, expectedDerived, pair.Derived) - // Latest - pair, err = db.Latest() + // Last + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, expectedDerivedFrom, pair.DerivedFrom) + require.Equal(t, expectedSource, pair.Source) require.Equal(t, expectedDerived, pair.Derived) - // FirstAfter Latest - _, err = db.FirstAfter(pair.DerivedFrom.ID(), pair.Derived.ID()) + // Next after Last + _, err = db.Next(types.DerivedIDPair{ + Source: pair.Source.ID(), + Derived: pair.Derived.ID()}) require.ErrorIs(t, err, types.ErrFuture) - // LastDerivedAt - derived, err := db.LastDerivedAt(expectedDerivedFrom.ID()) + // Last Derived + derived, err := db.SourceToLastDerived(expectedSource.ID()) require.NoError(t, err) require.Equal(t, expectedDerived, derived) - // LastDerivedAt with a non-existent block - _, err = db.LastDerivedAt(eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedDerivedFrom.Number}) + // Last Derived with a non-existent Source + _, err = db.SourceToLastDerived(eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedSource.Number}) require.ErrorIs(t, err, types.ErrConflict) - // FirstAfter with a non-existent block (derived and derivedFrom) - _, err = db.FirstAfter(eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedDerivedFrom.Number}, expectedDerived.ID()) + // Next with a non-existent block (derived and derivedFrom) + _, err = db.Next(types.DerivedIDPair{ + Source: eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedSource.Number}, + Derived: expectedDerived.ID()}) require.ErrorIs(t, err, types.ErrConflict) - _, err = db.FirstAfter(expectedDerivedFrom.ID(), eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedDerived.Number}) + _, err = db.Next(types.DerivedIDPair{ + Source: expectedSource.ID(), + Derived: eth.BlockID{Hash: common.Hash{0xaa}, Number: expectedDerived.Number}}) require.ErrorIs(t, err, types.ErrConflict) - // DerivedFrom - derivedFrom, err := db.DerivedFrom(expectedDerived.ID()) + // First Source + derivedFrom, err := db.DerivedToFirstSource(expectedDerived.ID()) require.NoError(t, err) - require.Equal(t, expectedDerivedFrom, derivedFrom) + require.Equal(t, expectedSource, derivedFrom) - // DerivedFrom with a non-existent block - _, err = db.DerivedFrom(eth.BlockID{Hash: common.Hash{0xbb}, Number: expectedDerived.Number}) + // Source with a non-existent Derived + _, err = db.DerivedToFirstSource(eth.BlockID{Hash: common.Hash{0xbb}, Number: expectedDerived.Number}) require.ErrorIs(t, err, types.ErrConflict) // PreviousDerived @@ -183,8 +191,8 @@ func TestSingleEntryDB(t *testing.T) { require.NoError(t, err) require.Equal(t, types.BlockSeal{}, prev, "zeroed seal before first entry") - // PreviousDerivedFrom - prev, err = db.PreviousDerivedFrom(expectedDerivedFrom.ID()) + // PreviousSource + prev, err = db.PreviousSource(expectedSource.ID()) require.NoError(t, err) require.Equal(t, types.BlockSeal{}, prev, "zeroed seal before first entry") @@ -192,30 +200,32 @@ func TestSingleEntryDB(t *testing.T) { _, err = db.NextDerived(expectedDerived.ID()) require.ErrorIs(t, err, types.ErrFuture) - // NextDerivedFrom - _, err = db.NextDerivedFrom(expectedDerivedFrom.ID()) + // NextSource + _, err = db.NextSource(expectedSource.ID()) require.ErrorIs(t, err, types.ErrFuture) - // FirstAfter - _, err = db.FirstAfter(expectedDerivedFrom.ID(), expectedDerived.ID()) + // Next + _, err = db.Next(types.DerivedIDPair{ + Source: expectedSource.ID(), + Derived: expectedDerived.ID()}) require.ErrorIs(t, err, types.ErrFuture) }) } func TestGap(t *testing.T) { // mockL1 starts at block 1 to produce a gap - expectedDerivedFrom := mockL1(1) + expectedSource := mockL1(1) // mockL2 starts at block 2 to produce a gap expectedDerived := mockL2(2) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { - require.NoError(t, db.AddDerived(toRef(expectedDerivedFrom, mockL1(0).Hash), toRef(expectedDerived, mockL2(0).Hash))) + require.NoError(t, db.AddDerived(toRef(expectedSource, mockL1(0).Hash), toRef(expectedDerived, mockL2(0).Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { _, err := db.NextDerived(mockL2(0).ID()) require.ErrorIs(t, err, types.ErrSkipped) - _, err = db.NextDerivedFrom(mockL1(0).ID()) + _, err = db.NextSource(mockL1(0).ID()) require.ErrorIs(t, err, types.ErrSkipped) }) } @@ -235,43 +245,43 @@ func TestThreeEntryDB(t *testing.T) { require.NoError(t, db.AddDerived(toRef(l1Block2, l1Block1.Hash), toRef(l2Block2, l2Block1.Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, l1Block2, pair.DerivedFrom) + require.Equal(t, l1Block2, pair.Source) require.Equal(t, l2Block2, pair.Derived) pair, err = db.First() require.NoError(t, err) - require.Equal(t, l1Block0, pair.DerivedFrom) + require.Equal(t, l1Block0, pair.Source) require.Equal(t, l2Block0, pair.Derived) - derived, err := db.LastDerivedAt(l1Block2.ID()) + derived, err := db.SourceToLastDerived(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l2Block2, derived) - _, err = db.LastDerivedAt(eth.BlockID{Hash: common.Hash{0xaa}, Number: l1Block2.Number}) + _, err = db.SourceToLastDerived(eth.BlockID{Hash: common.Hash{0xaa}, Number: l1Block2.Number}) require.ErrorIs(t, err, types.ErrConflict) - derivedFrom, err := db.DerivedFrom(l2Block2.ID()) + derivedFrom, err := db.DerivedToFirstSource(l2Block2.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) - _, err = db.DerivedFrom(eth.BlockID{Hash: common.Hash{0xbb}, Number: l2Block2.Number}) + _, err = db.DerivedToFirstSource(eth.BlockID{Hash: common.Hash{0xbb}, Number: l2Block2.Number}) require.ErrorIs(t, err, types.ErrConflict) - derived, err = db.LastDerivedAt(l1Block1.ID()) + derived, err = db.SourceToLastDerived(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l2Block1, derived) - derivedFrom, err = db.DerivedFrom(l2Block1.ID()) + derivedFrom, err = db.DerivedToFirstSource(l2Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derived, err = db.LastDerivedAt(l1Block0.ID()) + derived, err = db.SourceToLastDerived(l1Block0.ID()) require.NoError(t, err) require.Equal(t, l2Block0, derived) - derivedFrom, err = db.DerivedFrom(l2Block0.ID()) + derivedFrom, err = db.DerivedToFirstSource(l2Block0.ID()) require.NoError(t, err) require.Equal(t, l1Block0, derivedFrom) @@ -290,50 +300,56 @@ func TestThreeEntryDB(t *testing.T) { next, err := db.NextDerived(l2Block0.ID()) require.NoError(t, err) require.Equal(t, l2Block1, next.Derived) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) next, err = db.NextDerived(l2Block1.ID()) require.NoError(t, err) require.Equal(t, l2Block2, next.Derived) - require.Equal(t, l1Block2, next.DerivedFrom) + require.Equal(t, l1Block2, next.Source) _, err = db.NextDerived(l2Block2.ID()) require.ErrorIs(t, err, types.ErrFuture) - derivedFrom, err = db.PreviousDerivedFrom(l1Block0.ID()) + derivedFrom, err = db.PreviousSource(l1Block0.ID()) require.NoError(t, err) require.Equal(t, types.BlockSeal{}, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.PreviousSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block0, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block2.ID()) + derivedFrom, err = db.PreviousSource(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block0.ID()) + derivedFrom, err = db.NextSource(l1Block0.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.NextSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) - _, err = db.NextDerivedFrom(l1Block2.ID()) + _, err = db.NextSource(l1Block2.ID()) require.ErrorIs(t, err, types.ErrFuture) - _, err = db.FirstAfter(l1Block2.ID(), l2Block2.ID()) + _, err = db.Next(types.DerivedIDPair{ + Source: l1Block2.ID(), + Derived: l2Block2.ID()}) require.ErrorIs(t, err, types.ErrFuture) - next, err = db.FirstAfter(l1Block0.ID(), l2Block0.ID()) + next, err = db.Next(types.DerivedIDPair{ + Source: l1Block0.ID(), + Derived: l2Block0.ID()}) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block1, next.Derived) - next, err = db.FirstAfter(l1Block1.ID(), l2Block1.ID()) + next, err = db.Next(types.DerivedIDPair{ + Source: l1Block1.ID(), + Derived: l2Block1.ID()}) require.NoError(t, err) - require.Equal(t, l1Block2, next.DerivedFrom) + require.Equal(t, l1Block2, next.Source) require.Equal(t, l2Block2, next.Derived) }) } @@ -360,33 +376,33 @@ func TestFastL2Batcher(t *testing.T) { require.NoError(t, db.AddDerived(l1Ref1, toRef(l2Block2, l2Block1.Hash))) require.NoError(t, db.AddDerived(l1Ref1, toRef(l2Block3, l2Block2.Hash))) require.NoError(t, db.AddDerived(l1Ref1, toRef(l2Block4, l2Block3.Hash))) - // Latest L2 block derived from later L1 block + // Last L2 block derived from later L1 block require.NoError(t, db.AddDerived(toRef(l1Block2, l1Block1.Hash), toRef(l2Block5, l2Block4.Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, l1Block2, pair.DerivedFrom) + require.Equal(t, l1Block2, pair.Source) require.Equal(t, l2Block5, pair.Derived) - derived, err := db.LastDerivedAt(l1Block2.ID()) + derived, err := db.SourceToLastDerived(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l2Block5, derived) // test what tip was derived from - derivedFrom, err := db.DerivedFrom(l2Block5.ID()) + derivedFrom, err := db.DerivedToFirstSource(l2Block5.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) // Multiple L2 blocks all derived from same older L1 block for _, b := range []types.BlockSeal{l2Block1, l2Block2, l2Block3, l2Block4} { - derivedFrom, err = db.DerivedFrom(b.ID()) + derivedFrom, err = db.DerivedToFirstSource(b.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) } - // test that the latest L2 counts, not the intermediate - derived, err = db.LastDerivedAt(l1Block1.ID()) + // test that the Last L2 counts, not the intermediate + derived, err = db.SourceToLastDerived(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l2Block4, derived) @@ -408,46 +424,48 @@ func TestFastL2Batcher(t *testing.T) { next, err := db.NextDerived(l2Block0.ID()) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block1, next.Derived) next, err = db.NextDerived(l2Block1.ID()) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block2, next.Derived) next, err = db.NextDerived(l2Block2.ID()) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block3, next.Derived) next, err = db.NextDerived(l2Block3.ID()) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block4, next.Derived) next, err = db.NextDerived(l2Block4.ID()) require.NoError(t, err) - require.Equal(t, l1Block2, next.DerivedFrom) // derived from later L1 block + require.Equal(t, l1Block2, next.Source) // derived from later L1 block require.Equal(t, l2Block5, next.Derived) _, err = db.NextDerived(l2Block5.ID()) require.ErrorIs(t, err, types.ErrFuture) - derivedFrom, err = db.PreviousDerivedFrom(l1Block2.ID()) + derivedFrom, err = db.PreviousSource(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.PreviousSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block0, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block0.ID()) + derivedFrom, err = db.NextSource(l1Block0.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.NextSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) - _, err = db.NextDerivedFrom(l1Block2.ID()) + _, err = db.NextSource(l1Block2.ID()) require.ErrorIs(t, err, types.ErrFuture) - next, err = db.FirstAfter(l1Block1.ID(), l2Block2.ID()) + next, err = db.Next(types.DerivedIDPair{ + Source: l1Block1.ID(), + Derived: l2Block2.ID()}) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) // no increment in L1 yet, the next after is L2 block 3 + require.Equal(t, l1Block1, next.Source) // no increment in L1 yet, the next after is L2 block 3 require.Equal(t, l2Block3, next.Derived) }) } @@ -478,25 +496,25 @@ func TestSlowL2Batcher(t *testing.T) { require.NoError(t, db.AddDerived(toRef(l1Block5, l1Block4.Hash), toRef(l2Block2, l2Block1.Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l1Block5, pair.Source) require.Equal(t, l2Block2, pair.Derived) // test what we last derived at the tip - derived, err := db.LastDerivedAt(l1Block5.ID()) + derived, err := db.SourceToLastDerived(l1Block5.ID()) require.NoError(t, err) require.Equal(t, l2Block2, derived) // Multiple L1 blocks all copying the last known derived L2 block for _, b := range []types.BlockSeal{l1Block1, l1Block2, l1Block3, l1Block4} { - derived, err = db.LastDerivedAt(b.ID()) + derived, err = db.SourceToLastDerived(b.ID()) require.NoError(t, err) require.Equal(t, l2Block1, derived) } // test that the first L1 counts, not the ones that repeat the L2 info - derivedFrom, err := db.DerivedFrom(l2Block1.ID()) + derivedFrom, err := db.DerivedToFirstSource(l2Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) @@ -509,49 +527,51 @@ func TestSlowL2Batcher(t *testing.T) { next, err := db.NextDerived(l2Block0.ID()) require.NoError(t, err) - require.Equal(t, l1Block1, next.DerivedFrom) + require.Equal(t, l1Block1, next.Source) require.Equal(t, l2Block1, next.Derived) next, err = db.NextDerived(l2Block1.ID()) require.NoError(t, err) - require.Equal(t, l1Block5, next.DerivedFrom) + require.Equal(t, l1Block5, next.Source) require.Equal(t, l2Block2, next.Derived) _, err = db.NextDerived(l2Block2.ID()) require.ErrorIs(t, err, types.ErrFuture) - derivedFrom, err = db.PreviousDerivedFrom(l1Block5.ID()) + derivedFrom, err = db.PreviousSource(l1Block5.ID()) require.NoError(t, err) require.Equal(t, l1Block4, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block4.ID()) + derivedFrom, err = db.PreviousSource(l1Block4.ID()) require.NoError(t, err) require.Equal(t, l1Block3, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block3.ID()) + derivedFrom, err = db.PreviousSource(l1Block3.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block2.ID()) + derivedFrom, err = db.PreviousSource(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.PreviousDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.PreviousSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block0, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block0.ID()) + derivedFrom, err = db.NextSource(l1Block0.ID()) require.NoError(t, err) require.Equal(t, l1Block1, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block1.ID()) + derivedFrom, err = db.NextSource(l1Block1.ID()) require.NoError(t, err) require.Equal(t, l1Block2, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block2.ID()) + derivedFrom, err = db.NextSource(l1Block2.ID()) require.NoError(t, err) require.Equal(t, l1Block3, derivedFrom) - derivedFrom, err = db.NextDerivedFrom(l1Block4.ID()) + derivedFrom, err = db.NextSource(l1Block4.ID()) require.NoError(t, err) require.Equal(t, l1Block5, derivedFrom) - _, err = db.NextDerivedFrom(l1Block5.ID()) + _, err = db.NextSource(l1Block5.ID()) require.ErrorIs(t, err, types.ErrFuture) - next, err = db.FirstAfter(l1Block2.ID(), l2Block1.ID()) + next, err = db.Next(types.DerivedIDPair{ + Source: l1Block2.ID(), + Derived: l2Block1.ID()}) require.NoError(t, err) - require.Equal(t, l1Block3, next.DerivedFrom) + require.Equal(t, l1Block3, next.Source) require.Equal(t, l2Block1, next.Derived) // no increment in L2 yet, the next after is L1 block 3 }) } @@ -573,49 +593,49 @@ func TestManyEntryDB(t *testing.T) { func testManyEntryDB(t *testing.T, offsetL1 uint64, offsetL2 uint64) { // L2 -> first L1 occurrence - firstDerivedFrom := make(map[eth.BlockID]types.BlockSeal) + firstSource := make(map[eth.BlockID]types.BlockSeal) // L1 -> last L2 occurrence lastDerived := make(map[eth.BlockID]types.BlockSeal) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { // Insert genesis require.NoError(t, db.AddDerived(toRef(mockL1(offsetL1), common.Hash{}), toRef(mockL2(offsetL2), common.Hash{}))) - firstDerivedFrom[mockL2(offsetL2).ID()] = mockL1(offsetL1) + firstSource[mockL2(offsetL2).ID()] = mockL1(offsetL1) lastDerived[mockL1(offsetL1).ID()] = mockL2(offsetL2) rng := rand.New(rand.NewSource(1234)) // Insert 1000 randomly generated entries, derived at random bumps in L1 for i := uint64(0); i < 1000; i++ { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) switch rng.Intn(3) { case 0: // bump L1 - pair.DerivedFrom = mockL1(pair.DerivedFrom.Number + 1) + pair.Source = mockL1(pair.Source.Number + 1) case 1: // bump L2 pair.Derived = mockL2(pair.Derived.Number + 1) case 2: // bump both - pair.DerivedFrom = mockL1(pair.DerivedFrom.Number + 1) + pair.Source = mockL1(pair.Source.Number + 1) pair.Derived = mockL2(pair.Derived.Number + 1) } - derivedFromRef := toRef(pair.DerivedFrom, mockL1(pair.DerivedFrom.Number-1).Hash) + derivedFromRef := toRef(pair.Source, mockL1(pair.Source.Number-1).Hash) derivedRef := toRef(pair.Derived, mockL2(pair.Derived.Number-1).Hash) lastDerived[derivedFromRef.ID()] = pair.Derived - if _, ok := firstDerivedFrom[derivedRef.ID()]; !ok { - firstDerivedFrom[derivedRef.ID()] = pair.DerivedFrom + if _, ok := firstSource[derivedRef.ID()]; !ok { + firstSource[derivedRef.ID()] = pair.Source } require.NoError(t, db.AddDerived(derivedFromRef, derivedRef)) } }, func(t *testing.T, db *DB, m *stubMetrics) { // Now assert we can find what they are all derived from, and match the expectations. - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.NotZero(t, pair.DerivedFrom.Number-offsetL1) + require.NotZero(t, pair.Source.Number-offsetL1) require.NotZero(t, pair.Derived.Number-offsetL2) - for i := offsetL1; i <= pair.DerivedFrom.Number; i++ { + for i := offsetL1; i <= pair.Source.Number; i++ { l1ID := mockL1(i).ID() - derived, err := db.LastDerivedAt(l1ID) + derived, err := db.SourceToLastDerived(l1ID) require.NoError(t, err) require.Contains(t, lastDerived, l1ID) require.Equal(t, lastDerived[l1ID], derived) @@ -623,25 +643,25 @@ func testManyEntryDB(t *testing.T, offsetL1 uint64, offsetL2 uint64) { for i := offsetL2; i <= pair.Derived.Number; i++ { l2ID := mockL2(i).ID() - derivedFrom, err := db.DerivedFrom(l2ID) + derivedFrom, err := db.DerivedToFirstSource(l2ID) require.NoError(t, err) - require.Contains(t, firstDerivedFrom, l2ID) - require.Equal(t, firstDerivedFrom[l2ID], derivedFrom) + require.Contains(t, firstSource, l2ID) + require.Equal(t, firstSource[l2ID], derivedFrom) } // if not started at genesis, try to read older data, assert it's unavailable. if offsetL1 > 0 { - _, err := db.LastDerivedAt(mockL1(0).ID()) + _, err := db.SourceToLastDerived(mockL1(0).ID()) require.ErrorIs(t, err, types.ErrSkipped) - _, err = db.LastDerivedAt(mockL1(offsetL1 - 1).ID()) + _, err = db.SourceToLastDerived(mockL1(offsetL1 - 1).ID()) require.ErrorIs(t, err, types.ErrSkipped) } if offsetL2 > 0 { - _, err := db.DerivedFrom(mockL2(0).ID()) + _, err := db.DerivedToFirstSource(mockL2(0).ID()) require.ErrorIs(t, err, types.ErrSkipped) - _, err = db.DerivedFrom(mockL2(offsetL2 - 1).ID()) + _, err = db.DerivedToFirstSource(mockL2(offsetL2 - 1).ID()) require.ErrorIs(t, err, types.ErrSkipped) } }) @@ -674,9 +694,9 @@ func TestRewindToScope(t *testing.T) { require.NoError(t, db.AddDerived(toRef(l1Block5, l1Block4.Hash), toRef(l2Block2, l2Block1.Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l1Block5, pair.Source) require.Equal(t, l2Block2, pair.Derived) // Rewind to the future @@ -684,32 +704,32 @@ func TestRewindToScope(t *testing.T) { // Rewind to the exact block we're at require.NoError(t, db.RewindToScope(l1Block5.ID())) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l1Block5, pair.Source) require.Equal(t, l2Block2, pair.Derived) // Now rewind to L1 block 3 (inclusive). require.NoError(t, db.RewindToScope(l1Block3.ID())) // See if we find consistent data - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block3, pair.DerivedFrom) + require.Equal(t, l1Block3, pair.Source) require.Equal(t, l2Block1, pair.Derived) // Rewind further to L1 block 1 (inclusive). require.NoError(t, db.RewindToScope(l1Block1.ID())) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block1, pair.DerivedFrom) + require.Equal(t, l1Block1, pair.Source) require.Equal(t, l2Block1, pair.Derived) // Rewind further to L1 block 0 (inclusive). require.NoError(t, db.RewindToScope(l1Block0.ID())) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block0, pair.DerivedFrom) + require.Equal(t, l1Block0, pair.Source) require.Equal(t, l2Block0, pair.Derived) }) } @@ -741,9 +761,9 @@ func TestRewindToFirstDerived(t *testing.T) { require.NoError(t, db.AddDerived(toRef(l1Block5, l1Block4.Hash), toRef(l2Block2, l2Block1.Hash))) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l1Block5, pair.Source) require.Equal(t, l2Block2, pair.Derived) // Rewind to the future @@ -751,25 +771,25 @@ func TestRewindToFirstDerived(t *testing.T) { // Rewind to the exact block we're at require.NoError(t, db.RewindToFirstDerived(l2Block2.ID())) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l1Block5, pair.Source) require.Equal(t, l2Block2, pair.Derived) // Now rewind to L2 block 1 require.NoError(t, db.RewindToFirstDerived(l2Block1.ID())) // See if we went back to the first occurrence of L2 block 1. - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block1, pair.DerivedFrom) + require.Equal(t, l1Block1, pair.Source) require.Equal(t, l2Block1, pair.Derived) // Rewind further to L2 block 0 (inclusive). require.NoError(t, db.RewindToFirstDerived(l2Block0.ID())) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) - require.Equal(t, l1Block0, pair.DerivedFrom) + require.Equal(t, l1Block0, pair.Source) require.Equal(t, l2Block0, pair.Derived) }) } @@ -798,25 +818,25 @@ func TestInvalidateAndReplace(t *testing.T) { require.NoError(t, db.AddDerived(l1Ref2, l2Ref2)) require.NoError(t, db.AddDerived(l1Ref3, l2Ref3)) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) require.Equal(t, l2Ref3.ID(), pair.Derived.ID()) - require.Equal(t, l1Block1.ID(), pair.DerivedFrom.ID()) + require.Equal(t, l1Block1.ID(), pair.Source.ID()) _, err = db.Invalidated() require.ErrorIs(t, err, types.ErrConflict) invalidated := types.DerivedBlockRefPair{ - DerivedFrom: l1Ref1, - Derived: l2Ref2, + Source: l1Ref1, + Derived: l2Ref2, } require.NoError(t, db.RewindAndInvalidate(invalidated)) - _, err = db.Latest() + _, err = db.Last() require.ErrorIs(t, err, types.ErrAwaitReplacementBlock) pair, err = db.Invalidated() require.NoError(t, err) - require.Equal(t, invalidated.DerivedFrom.ID(), pair.DerivedFrom.ID()) + require.Equal(t, invalidated.Source.ID(), pair.Source.ID()) require.Equal(t, invalidated.Derived.ID(), pair.Derived.ID()) replacement := l2Ref2 @@ -825,12 +845,12 @@ func TestInvalidateAndReplace(t *testing.T) { result, err := db.ReplaceInvalidatedBlock(replacement, invalidated.Derived.Hash) require.NoError(t, err) require.Equal(t, replacement.ID(), result.Derived.ID()) - require.Equal(t, l1Block1.ID(), result.DerivedFrom.ID()) + require.Equal(t, l1Block1.ID(), result.Source.ID()) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) require.Equal(t, replacement.ID(), pair.Derived.ID()) - require.Equal(t, l1Block1.ID(), pair.DerivedFrom.ID()) + require.Equal(t, l1Block1.ID(), pair.Source.ID()) }) } @@ -867,25 +887,25 @@ func TestInvalidateAndReplaceNonFirst(t *testing.T) { require.NoError(t, db.AddDerived(l1Ref2, l2Ref3)) // to be invalidated and replaced require.NoError(t, db.AddDerived(l1Ref2, l2Ref4)) }, func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) require.Equal(t, l2Ref4.ID(), pair.Derived.ID()) - require.Equal(t, l1Block2.ID(), pair.DerivedFrom.ID()) + require.Equal(t, l1Block2.ID(), pair.Source.ID()) _, err = db.Invalidated() require.ErrorIs(t, err, types.ErrConflict) invalidated := types.DerivedBlockRefPair{ - DerivedFrom: l1Ref2, - Derived: l2Ref3, + Source: l1Ref2, + Derived: l2Ref3, } require.NoError(t, db.RewindAndInvalidate(invalidated)) - _, err = db.Latest() + _, err = db.Last() require.ErrorIs(t, err, types.ErrAwaitReplacementBlock) pair, err = db.Invalidated() require.NoError(t, err) - require.Equal(t, invalidated.DerivedFrom.ID(), pair.DerivedFrom.ID()) + require.Equal(t, invalidated.Source.ID(), pair.Source.ID()) require.Equal(t, invalidated.Derived.ID(), pair.Derived.ID()) replacement := l2Ref3 @@ -894,38 +914,42 @@ func TestInvalidateAndReplaceNonFirst(t *testing.T) { result, err := db.ReplaceInvalidatedBlock(replacement, invalidated.Derived.Hash) require.NoError(t, err) require.Equal(t, replacement.ID(), result.Derived.ID()) - require.Equal(t, l1Block2.ID(), result.DerivedFrom.ID()) + require.Equal(t, l1Block2.ID(), result.Source.ID()) - pair, err = db.Latest() + pair, err = db.Last() require.NoError(t, err) require.Equal(t, replacement.ID(), pair.Derived.ID()) - require.Equal(t, l1Block2.ID(), pair.DerivedFrom.ID()) + require.Equal(t, l1Block2.ID(), pair.Source.ID()) // The L2 block before the replacement should point to 2 prev, err := db.PreviousDerived(replacement.ID()) require.NoError(t, err) require.Equal(t, l2Ref2.ID(), prev.ID()) - lastFrom1, err := db.LastDerivedAt(l1Block1.ID()) + lastFrom1, err := db.SourceToLastDerived(l1Block1.ID()) require.NoError(t, err) // while invalidated, at this point in L1, it was still the local-safe block require.Equal(t, l2Ref3.ID(), lastFrom1.ID()) // This should point to the original, since we traverse based on L1 scope - entryBlock3, err := db.FirstAfter(l1Block1.ID(), l2Ref2.ID()) + entryBlock3, err := db.Next(types.DerivedIDPair{ + Source: l1Block1.ID(), + Derived: l2Ref2.ID()}) require.NoError(t, err) require.Equal(t, l2Ref3.ID(), entryBlock3.Derived.ID()) - require.Equal(t, l1Block1.ID(), entryBlock3.DerivedFrom.ID()) + require.Equal(t, l1Block1.ID(), entryBlock3.Source.ID()) // And then find the replacement, once we traverse further - entryBlockRepl, err := db.FirstAfter(l1Block1.ID(), l2Ref3.ID()) + entryBlockRepl, err := db.Next(types.DerivedIDPair{ + Source: l1Block1.ID(), + Derived: l2Ref3.ID()}) require.NoError(t, err) require.Equal(t, replacement.ID(), entryBlockRepl.Derived.ID()) - require.Equal(t, l1Block2.ID(), entryBlockRepl.DerivedFrom.ID()) + require.Equal(t, l1Block2.ID(), entryBlockRepl.Source.ID()) // Check if canonical chain is represented accurately - require.NoError(t, db.IsDerived(l2Ref2.ID()), "common block 2 is valid part of canonical chain") - require.NoError(t, db.IsDerived(replacement.ID()), "replacement is valid part of canonical chain") - require.ErrorIs(t, db.IsDerived(l2Ref3.ID()), types.ErrConflict, "invalidated block is not valid in canonical chain") + require.NoError(t, db.ContainsDerived(l2Ref2.ID()), "common block 2 is valid part of canonical chain") + require.NoError(t, db.ContainsDerived(replacement.ID()), "replacement is valid part of canonical chain") + require.ErrorIs(t, db.ContainsDerived(l2Ref3.ID()), types.ErrConflict, "invalidated block is not valid in canonical chain") }) } diff --git a/op-supervisor/supervisor/backend/db/fromda/entry.go b/op-supervisor/supervisor/backend/db/fromda/entry.go index 907d18bd729..a8365ef1993 100644 --- a/op-supervisor/supervisor/backend/db/fromda/entry.go +++ b/op-supervisor/supervisor/backend/db/fromda/entry.go @@ -19,14 +19,14 @@ func (e Entry) Type() EntryType { type EntryType uint8 const ( - DerivedFromV0 EntryType = 0 + SourceV0 EntryType = 0 InvalidatedFromV0 EntryType = 1 ) func (s EntryType) String() string { switch s { - case DerivedFromV0: - return "derivedFromV0" + case SourceV0: + return "sourceV0" case InvalidatedFromV0: return "invalidatedFromV0" default: @@ -50,8 +50,8 @@ func (EntryBinary) EntrySize() int { // LinkEntry is a DerivedFromV0 or a InvalidatedFromV0 kind type LinkEntry struct { - derivedFrom types.BlockSeal - derived types.BlockSeal + source types.BlockSeal + derived types.BlockSeal // when it exists as local-safe, but cannot be cross-safe. // If false: this link is a DerivedFromV0 // If true: this link is a InvalidatedFromV0 @@ -59,11 +59,11 @@ type LinkEntry struct { } func (d LinkEntry) String() string { - return fmt.Sprintf("LinkEntry(derivedFrom: %s, derived: %s, invalidated: %v)", d.derivedFrom, d.derived, d.invalidated) + return fmt.Sprintf("LinkEntry(derivedFrom: %s, derived: %s, invalidated: %v)", d.source, d.derived, d.invalidated) } func (d *LinkEntry) decode(e Entry) error { - if t := e.Type(); t != DerivedFromV0 && t != InvalidatedFromV0 { + if t := e.Type(); t != SourceV0 && t != InvalidatedFromV0 { return fmt.Errorf("%w: unexpected entry type: %s", types.ErrDataCorruption, e.Type()) } if [3]byte(e[1:4]) != ([3]byte{}) { @@ -74,15 +74,15 @@ func (d *LinkEntry) decode(e Entry) error { // l1-number(8) l1-timestamp(8) l2-number(8) l2-timestamp(8) l1-hash(32) l2-hash(32) // Note: attributes are ordered for lexical sorting to nicely match chronological sorting. offset := 4 - d.derivedFrom.Number = binary.BigEndian.Uint64(e[offset : offset+8]) + d.source.Number = binary.BigEndian.Uint64(e[offset : offset+8]) offset += 8 - d.derivedFrom.Timestamp = binary.BigEndian.Uint64(e[offset : offset+8]) + d.source.Timestamp = binary.BigEndian.Uint64(e[offset : offset+8]) offset += 8 d.derived.Number = binary.BigEndian.Uint64(e[offset : offset+8]) offset += 8 d.derived.Timestamp = binary.BigEndian.Uint64(e[offset : offset+8]) offset += 8 - copy(d.derivedFrom.Hash[:], e[offset:offset+32]) + copy(d.source.Hash[:], e[offset:offset+32]) offset += 32 copy(d.derived.Hash[:], e[offset:offset+32]) return nil @@ -93,18 +93,18 @@ func (d *LinkEntry) encode() Entry { if d.invalidated { out[0] = uint8(InvalidatedFromV0) } else { - out[0] = uint8(DerivedFromV0) + out[0] = uint8(SourceV0) } offset := 4 - binary.BigEndian.PutUint64(out[offset:offset+8], d.derivedFrom.Number) + binary.BigEndian.PutUint64(out[offset:offset+8], d.source.Number) offset += 8 - binary.BigEndian.PutUint64(out[offset:offset+8], d.derivedFrom.Timestamp) + binary.BigEndian.PutUint64(out[offset:offset+8], d.source.Timestamp) offset += 8 binary.BigEndian.PutUint64(out[offset:offset+8], d.derived.Number) offset += 8 binary.BigEndian.PutUint64(out[offset:offset+8], d.derived.Timestamp) offset += 8 - copy(out[offset:offset+32], d.derivedFrom.Hash[:]) + copy(out[offset:offset+32], d.source.Hash[:]) offset += 32 copy(out[offset:offset+32], d.derived.Hash[:]) return out @@ -115,7 +115,7 @@ func (d *LinkEntry) sealOrErr() (types.DerivedBlockSealPair, error) { return types.DerivedBlockSealPair{}, types.ErrAwaitReplacementBlock } return types.DerivedBlockSealPair{ - DerivedFrom: d.derivedFrom, - Derived: d.derived, + Source: d.source, + Derived: d.derived, }, nil } diff --git a/op-supervisor/supervisor/backend/db/fromda/entry_test.go b/op-supervisor/supervisor/backend/db/fromda/entry_test.go index eedc07aaece..6c39b84708b 100644 --- a/op-supervisor/supervisor/backend/db/fromda/entry_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/entry_test.go @@ -13,7 +13,7 @@ import ( func FuzzRoundtripLinkEntry(f *testing.F) { f.Fuzz(func(t *testing.T, aHash []byte, aNum uint64, aTimestamp uint64, bHash []byte, bNum uint64, bTimestamp uint64) { x := LinkEntry{ - derivedFrom: types.BlockSeal{ + source: types.BlockSeal{ Hash: common.BytesToHash(aHash), Number: aNum, Timestamp: aTimestamp, @@ -25,7 +25,7 @@ func FuzzRoundtripLinkEntry(f *testing.F) { }, } entry := x.encode() - require.Equal(t, DerivedFromV0, entry.Type()) + require.Equal(t, SourceV0, entry.Type()) var y LinkEntry err := y.decode(entry) require.NoError(t, err) diff --git a/op-supervisor/supervisor/backend/db/fromda/update.go b/op-supervisor/supervisor/backend/db/fromda/update.go index 92afe0799eb..f5ec04bee09 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update.go +++ b/op-supervisor/supervisor/backend/db/fromda/update.go @@ -42,7 +42,7 @@ func (db *DB) ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidat // Find the parent-block of derived-from. // We need this to build a block-ref, so the DB can be consistency-checked when the next entry is added. // There is always one, since the first entry in the DB should never be an invalidated one. - prevDerivedFrom, err := db.previousDerivedFrom(last.derivedFrom.ID()) + prevSource, err := db.previousSource(last.source.ID()) if err != nil { return types.DerivedBlockSealPair{}, err } @@ -52,12 +52,12 @@ func (db *DB) ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidat return types.DerivedBlockSealPair{}, err } replacement := types.DerivedBlockRefPair{ - DerivedFrom: last.derivedFrom.ForceWithParent(prevDerivedFrom.ID()), - Derived: replacementDerived, + Source: last.source.ForceWithParent(prevSource.ID()), + Derived: replacementDerived, } // Insert the replacement - if err := db.addLink(replacement.DerivedFrom, replacement.Derived, invalidated); err != nil { - return types.DerivedBlockSealPair{}, fmt.Errorf("failed to add %s as replacement at %s: %w", replacement.Derived, replacement.DerivedFrom, err) + if err := db.addLink(replacement.Source, replacement.Derived, invalidated); err != nil { + return types.DerivedBlockSealPair{}, fmt.Errorf("failed to add %s as replacement at %s: %w", replacement.Derived, replacement.Source, err) } return replacement.Seals(), nil } @@ -70,13 +70,13 @@ func (db *DB) RewindAndInvalidate(invalidated types.DerivedBlockRefPair) error { defer db.rwLock.Unlock() invalidatedSeals := types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSealFromRef(invalidated.DerivedFrom), - Derived: types.BlockSealFromRef(invalidated.Derived), + Source: types.BlockSealFromRef(invalidated.Source), + Derived: types.BlockSealFromRef(invalidated.Derived), } if err := db.rewindLocked(invalidatedSeals, true); err != nil { return err } - if err := db.addLink(invalidated.DerivedFrom, invalidated.Derived, invalidated.Derived.Hash); err != nil { + if err := db.addLink(invalidated.Source, invalidated.Derived, invalidated.Derived.Hash); err != nil { return fmt.Errorf("failed to add invalidation entry %s: %w", invalidated, err) } return nil @@ -95,19 +95,20 @@ func (db *DB) Rewind(target types.DerivedBlockSealPair, including bool) error { // Note that this drop L1 blocks that resulted in a previously invalidated local-safe block. // This returns ErrFuture if the block is newer than the last known block. // This returns ErrConflict if a different block at the given height is known. +// TODO: rename this "RewindToSource" to match the idea of Source func (db *DB) RewindToScope(scope eth.BlockID) error { db.rwLock.Lock() defer db.rwLock.Unlock() - _, link, err := db.lastDerivedAt(scope.Number) + _, link, err := db.sourceNumToLastDerived(scope.Number) if err != nil { return fmt.Errorf("failed to find last derived %d: %w", scope.Number, err) } - if link.derivedFrom.ID() != scope { - return fmt.Errorf("found derived-from %s but expected %s: %w", link.derivedFrom, scope, types.ErrConflict) + if link.source.ID() != scope { + return fmt.Errorf("found derived-from %s but expected %s: %w", link.source, scope, types.ErrConflict) } return db.rewindLocked(types.DerivedBlockSealPair{ - DerivedFrom: link.derivedFrom, - Derived: link.derived, + Source: link.source, + Derived: link.derived, }, false) } @@ -116,7 +117,7 @@ func (db *DB) RewindToScope(scope eth.BlockID) error { func (db *DB) RewindToFirstDerived(v eth.BlockID) error { db.rwLock.Lock() defer db.rwLock.Unlock() - _, link, err := db.firstDerivedFrom(v.Number) + _, link, err := db.derivedNumToFirstSource(v.Number) if err != nil { return fmt.Errorf("failed to find when %d was first derived: %w", v.Number, err) } @@ -124,8 +125,8 @@ func (db *DB) RewindToFirstDerived(v eth.BlockID) error { return fmt.Errorf("found derived %s but expected %s: %w", link.derived, v, types.ErrConflict) } return db.rewindLocked(types.DerivedBlockSealPair{ - DerivedFrom: link.derivedFrom, - Derived: link.derived, + Source: link.source, + Derived: link.derived, }, false) } @@ -135,13 +136,13 @@ func (db *DB) RewindToFirstDerived(v eth.BlockID) error { // Note: This function must be called with the rwLock held. // Callers are responsible for locking and unlocking the Database. func (db *DB) rewindLocked(t types.DerivedBlockSealPair, including bool) error { - i, link, err := db.lookup(t.DerivedFrom.Number, t.Derived.Number) + i, link, err := db.lookup(t.Source.Number, t.Derived.Number) if err != nil { return err } - if link.derivedFrom.Hash != t.DerivedFrom.Hash { + if link.source.Hash != t.Source.Hash { return fmt.Errorf("found derived-from %s, but expected %s: %w", - link.derivedFrom, t.DerivedFrom, types.ErrConflict) + link.source, t.Source, types.ErrConflict) } if link.derived.Hash != t.Derived.Hash { return fmt.Errorf("found derived %s, but expected %s: %w", @@ -164,7 +165,7 @@ func (db *DB) rewindLocked(t types.DerivedBlockSealPair, including bool) error { // the invalidated hash needs to match it, even if a new derived block replaces it. func (db *DB) addLink(derivedFrom eth.BlockRef, derived eth.BlockRef, invalidated common.Hash) error { link := LinkEntry{ - derivedFrom: types.BlockSeal{ + source: types.BlockSeal{ Hash: derivedFrom.Hash, Number: derivedFrom.Number, Timestamp: derivedFrom.Time, @@ -196,17 +197,17 @@ func (db *DB) addLink(derivedFrom eth.BlockRef, derived eth.BlockRef, invalidate if last.invalidated { return fmt.Errorf("cannot build %s on top of invalidated entry %s: %w", link, last, types.ErrConflict) } - lastDerivedFrom := last.derivedFrom + lastSource := last.source lastDerived := last.derived - if lastDerived.ID() == derived.ID() && lastDerivedFrom.ID() == derivedFrom.ID() { + if lastDerived.ID() == derived.ID() && lastSource.ID() == derivedFrom.ID() { // it shouldn't be possible, but the ID component of a block ref doesn't include the timestamp // so if the timestampt doesn't match, still return no error to the caller, but at least log a warning if lastDerived.Timestamp != derived.Time { db.log.Warn("Derived block already exists with different timestamp", "derived", derived, "lastDerived", lastDerived) } - if lastDerivedFrom.Timestamp != derivedFrom.Time { - db.log.Warn("Derived-from block already exists with different timestamp", "derivedFrom", derivedFrom, "lastDerivedFrom", lastDerivedFrom) + if lastSource.Timestamp != derivedFrom.Time { + db.log.Warn("Derived-from block already exists with different timestamp", "derivedFrom", derivedFrom, "lastSource", lastSource) } // Repeat of same information. No entries to be written. // But we can silently ignore and not return an error, as that brings the caller @@ -236,7 +237,7 @@ func (db *DB) addLink(derivedFrom eth.BlockRef, derived eth.BlockRef, invalidate } else if lastDerived.Number+1 < derived.Number { return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", derived, derivedFrom, - lastDerived, lastDerivedFrom, + lastDerived, lastSource, types.ErrOutOfOrder) } else { return fmt.Errorf("derived block %s is older than current derived block %s: %w", @@ -244,28 +245,28 @@ func (db *DB) addLink(derivedFrom eth.BlockRef, derived eth.BlockRef, invalidate } // Check derived-from relation: multiple L2 blocks may be derived from the same L1 block. But everything in sequence. - if lastDerivedFrom.Number == derivedFrom.Number { + if lastSource.Number == derivedFrom.Number { // Same block height? Then it must be the same block. - if lastDerivedFrom.Hash != derivedFrom.Hash { + if lastSource.Hash != derivedFrom.Hash { return fmt.Errorf("cannot add block %s as derived from %s, expected to be derived from %s at this block height: %w", - derived, derivedFrom, lastDerivedFrom, types.ErrConflict) + derived, derivedFrom, lastSource, types.ErrConflict) } - } else if lastDerivedFrom.Number+1 == derivedFrom.Number { + } else if lastSource.Number+1 == derivedFrom.Number { // parent hash check - if lastDerivedFrom.Hash != derivedFrom.ParentHash { + if lastSource.Hash != derivedFrom.ParentHash { return fmt.Errorf("cannot add block %s as derived from %s (parent %s) derived on top of %s: %w", - derived, derivedFrom, derivedFrom.ParentHash, lastDerivedFrom, types.ErrConflict) + derived, derivedFrom, derivedFrom.ParentHash, lastSource, types.ErrConflict) } - } else if lastDerivedFrom.Number+1 < derivedFrom.Number { + } else if lastSource.Number+1 < derivedFrom.Number { // adding block that is derived from something too far into the future return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", derived, derivedFrom, - lastDerived, lastDerivedFrom, + lastDerived, lastSource, types.ErrOutOfOrder) } else { // adding block that is derived from something too old return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w", - derived, derivedFrom, lastDerivedFrom, types.ErrOutOfOrder) + derived, derivedFrom, lastSource, types.ErrOutOfOrder) } e := link.encode() diff --git a/op-supervisor/supervisor/backend/db/fromda/update_test.go b/op-supervisor/supervisor/backend/db/fromda/update_test.go index 14fb1bdf3ee..d73ddbb6ef3 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/update_test.go @@ -17,23 +17,23 @@ type testCase struct { } func TestBadUpdates(t *testing.T) { - aDerivedFrom := mockL1(1) + aSource := mockL1(1) aDerived := mockL2(201) - bDerivedFrom := mockL1(2) + bSource := mockL1(2) bDerived := mockL2(202) - cDerivedFrom := mockL1(3) + cSource := mockL1(3) cDerived := mockL2(203) - dDerivedFrom := mockL1(4) + dSource := mockL1(4) dDerived := mockL2(204) - eDerivedFrom := mockL1(5) + eSource := mockL1(5) eDerived := mockL2(205) - fDerivedFrom := mockL1(6) + fSource := mockL1(6) fDerived := mockL2(206) noChange := assertFn(func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, dDerivedFrom, pair.DerivedFrom) + require.Equal(t, dSource, pair.Source) require.Equal(t, dDerived, pair.Derived) }) @@ -41,14 +41,14 @@ func TestBadUpdates(t *testing.T) { { name: "add on old derivedFrom", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(bSource, aSource.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "repeat parent derivedFrom", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(cSource, bSource.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, @@ -57,56 +57,56 @@ func TestBadUpdates(t *testing.T) { setupFn: func(t *testing.T, db *DB, m *stubMetrics) { require.ErrorIs(t, db.AddDerived(toRef(types.BlockSeal{ Hash: common.Hash{0xba, 0xd}, - Number: dDerivedFrom.Number, - Timestamp: dDerivedFrom.Timestamp, - }, cDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), types.ErrConflict) + Number: dSource.Number, + Timestamp: dSource.Timestamp, + }, cSource.Hash), toRef(eDerived, dDerived.Hash)), types.ErrConflict) }, assertFn: noChange, }, { - name: "CrossDerivedFrom with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", + name: "CrossSource with conflicting parent root, same L1 height, new L2: accepted, L1 parent-hash is used only on L1 increments.", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.NoError(t, db.AddDerived(toRef(dDerivedFrom, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict) + require.NoError(t, db.AddDerived(toRef(dSource, common.Hash{0x42}), toRef(eDerived, dDerived.Hash)), types.ErrConflict) }, assertFn: func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, dDerivedFrom, pair.DerivedFrom) + require.Equal(t, dSource, pair.Source) require.Equal(t, eDerived, pair.Derived) }, }, { name: "Conflicting derivedFrom parent root, new L1 height, same L2", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(eDerivedFrom, common.Hash{0x42}), toRef(dDerived, cDerived.Hash)), types.ErrConflict) + require.ErrorIs(t, db.AddDerived(toRef(eSource, common.Hash{0x42}), toRef(dDerived, cDerived.Hash)), types.ErrConflict) }, assertFn: noChange, }, { name: "add on too new derivedFrom (even if parent-hash looks correct)", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(fDerivedFrom, dDerivedFrom.Hash), toRef(eDerived, dDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(fSource, dSource.Hash), toRef(eDerived, dDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "add on old derivedFrom (even if parent-hash looks correct)", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(cDerivedFrom, bDerivedFrom.Hash), toRef(cDerived, dDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(cSource, bSource.Hash), toRef(cDerived, dDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "add on even older derivedFrom", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(bDerivedFrom, aDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(bSource, aSource.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "add on conflicting derived, same L2 height, new L1 block", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(eDerivedFrom, dDerivedFrom.Hash), toRef(types.BlockSeal{ + require.ErrorIs(t, db.AddDerived(toRef(eSource, dSource.Hash), toRef(types.BlockSeal{ Hash: common.Hash{0x42}, Number: dDerived.Number, Timestamp: dDerived.Timestamp, @@ -117,40 +117,40 @@ func TestBadUpdates(t *testing.T) { { name: "add derived with conflicting parent hash, new L1 height, same L2 height: accepted, L2 parent-hash is only checked on L2 increments.", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.NoError(t, db.AddDerived(toRef(eDerivedFrom, dDerivedFrom.Hash), toRef(dDerived, common.Hash{0x42})), types.ErrConflict) + require.NoError(t, db.AddDerived(toRef(eSource, dSource.Hash), toRef(dDerived, common.Hash{0x42})), types.ErrConflict) }, assertFn: func(t *testing.T, db *DB, m *stubMetrics) { - pair, err := db.Latest() + pair, err := db.Last() require.NoError(t, err) - require.Equal(t, eDerivedFrom, pair.DerivedFrom) + require.Equal(t, eSource, pair.Source) require.Equal(t, dDerived, pair.Derived) }, }, { name: "add derived with conflicting parent hash, same L1 height, new L2 height", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(eDerived, common.Hash{0x42})), types.ErrConflict) + require.ErrorIs(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(eDerived, common.Hash{0x42})), types.ErrConflict) }, assertFn: noChange, }, { name: "add on too new derived (even if parent-hash looks correct)", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(fDerived, dDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(fDerived, dDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "add on old derived (even if parent-hash looks correct)", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(cDerived, bDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(cDerived, bDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, { name: "add on even older derived", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { - require.ErrorIs(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(bDerived, aDerived.Hash)), types.ErrOutOfOrder) + require.ErrorIs(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(bDerived, aDerived.Hash)), types.ErrOutOfOrder) }, assertFn: noChange, }, @@ -158,7 +158,7 @@ func TestBadUpdates(t *testing.T) { name: "repeat self, silent no-op", setupFn: func(t *testing.T, db *DB, m *stubMetrics) { pre := m.DBDerivedEntryCount - require.NoError(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) + require.NoError(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(dDerived, cDerived.Hash)), types.ErrOutOfOrder) require.Equal(t, pre, m.DBDerivedEntryCount) }, assertFn: noChange, @@ -170,7 +170,7 @@ func TestBadUpdates(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { // Good first entry - require.NoError(t, db.AddDerived(toRef(dDerivedFrom, cDerivedFrom.Hash), toRef(dDerived, cDerived.Hash))) + require.NoError(t, db.AddDerived(toRef(dSource, cSource.Hash), toRef(dDerived, cDerived.Hash))) // apply the test-case setup tc.setupFn(t, db, m) }, diff --git a/op-supervisor/supervisor/backend/db/logs/db.go b/op-supervisor/supervisor/backend/db/logs/db.go index bb87d5abf9d..7af29fc132a 100644 --- a/op-supervisor/supervisor/backend/db/logs/db.go +++ b/op-supervisor/supervisor/backend/db/logs/db.go @@ -282,13 +282,20 @@ func (db *DB) Get(blockNum uint64, logIdx uint32) (common.Hash, error) { // This can be used to check the validity of cross-chain interop events. // The block-seal of the blockNum block, that the log was included in, is returned. // This seal may be fully zeroed, without error, if the block isn't fully known yet. -func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (types.BlockSeal, error) { +func (db *DB) Contains(query types.ContainsQuery) (types.BlockSeal, error) { + blockNum, logIdx, logHash, timestamp := query.BlockNum, query.LogIdx, query.LogHash, query.Timestamp db.rwLock.RLock() defer db.rwLock.RUnlock() db.log.Trace("Checking for log", "blockNum", blockNum, "logIdx", logIdx, "hash", logHash) // Hot-path: check if we have the block if db.lastEntryContext.hasCompleteBlock() && db.lastEntryContext.blockNum < blockNum { + // it is possible that while the included Block Number is beyond the end of the database, + // the included timestamp is within the database. In this case we know the request is not just a ErrFuture, + // but a ErrConflict, as we know the request will not be included in the future. + if db.lastEntryContext.timestamp > timestamp { + return types.BlockSeal{}, types.ErrConflict + } return types.BlockSeal{}, types.ErrFuture } @@ -318,16 +325,22 @@ func (db *DB) Contains(blockNum uint64, logIdx uint32, logHash common.Hash) (typ if err == nil { panic("expected iterator to stop with error") } + // ErrStop indicates we've found the block, and the iterator is positioned at it. if errors.Is(err, types.ErrStop) { h, n, ok := iter.SealedBlock() if !ok { return types.BlockSeal{}, fmt.Errorf("iterator stopped but no sealed block found") } - timestamp, _ := iter.SealedTimestamp() + t, _ := iter.SealedTimestamp() + // check the timestamp invariant on the result + if t != timestamp { + return types.BlockSeal{}, fmt.Errorf("timestamp mismatch: expected %d, got %d %w", timestamp, t, types.ErrConflict) + } + // construct a block seal with the found data now that we know it's correct return types.BlockSeal{ Hash: h, Number: n, - Timestamp: timestamp, + Timestamp: t, }, nil } return types.BlockSeal{}, err diff --git a/op-supervisor/supervisor/backend/db/logs/db_test.go b/op-supervisor/supervisor/backend/db/logs/db_test.go index bbd4a0c76d3..29968042110 100644 --- a/op-supervisor/supervisor/backend/db/logs/db_test.go +++ b/op-supervisor/supervisor/backend/db/logs/db_test.go @@ -82,8 +82,8 @@ func TestEmptyDbDoesNotFindEntry(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) {}, func(t *testing.T, db *DB, m *stubMetrics) { - requireFuture(t, db, 1, 0, createHash(1)) - requireFuture(t, db, 1, 0, common.Hash{}) + requireFuture(t, db, 1, 0, 1, createHash(1)) + requireFuture(t, db, 1, 0, 1, common.Hash{}) }) } @@ -232,13 +232,13 @@ func TestAddLog(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { genesis := eth.BlockID{Hash: createHash(15), Number: 15} - require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5000), "seal genesis") + require.NoError(t, db.SealBlock(common.Hash{}, genesis, 5001), "seal genesis") err := db.AddLog(createHash(1), genesis, 0, nil) require.NoError(t, err, "first log after genesis") require.NoError(t, db.SealBlock(genesis.Hash, eth.BlockID{Hash: createHash(16), Number: 16}, 5001)) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 16, 0, createHash(1)) + requireContains(t, db, 16, 0, 5001, createHash(1)) ref, logCount, execMsgs, err := db.OpenBlock(16) require.NoError(t, err) @@ -271,9 +271,9 @@ func TestAddLog(t *testing.T) { }, func(t *testing.T, db *DB, m *stubMetrics) { require.EqualValues(t, 16*2+3+2, m.entryCount, "empty blocks have logs") - requireContains(t, db, 16, 0, createHash(1)) - requireContains(t, db, 16, 1, createHash(2)) - requireContains(t, db, 16, 2, createHash(3)) + requireContains(t, db, 16, 0, 5016, createHash(1)) + requireContains(t, db, 16, 1, 5016, createHash(2)) + requireContains(t, db, 16, 2, 5016, createHash(3)) ref, logCount, execMsgs, err := db.OpenBlock(13) require.NoError(t, err) @@ -290,35 +290,36 @@ func TestAddLog(t *testing.T) { }) t.Run("MultipleEntriesFromMultipleBlocks", func(t *testing.T) { + t14, t15, t16, t17 := uint64(5000), uint64(5001), uint64(5003), uint64(5003) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl14 := eth.BlockID{Hash: createHash(14), Number: 14} - err := db.SealBlock(createHash(13), bl14, 5000) + err := db.SealBlock(createHash(13), bl14, t14) require.NoError(t, err) bl15 := eth.BlockID{Hash: createHash(15), Number: 15} - err = db.SealBlock(createHash(14), bl15, 5001) + err = db.SealBlock(createHash(14), bl15, t15) require.NoError(t, err) err = db.AddLog(createHash(1), bl15, 0, nil) require.NoError(t, err) err = db.AddLog(createHash(2), bl15, 1, nil) require.NoError(t, err) bl16 := eth.BlockID{Hash: createHash(16), Number: 16} - err = db.SealBlock(bl15.Hash, bl16, 5003) + err = db.SealBlock(bl15.Hash, bl16, t16) require.NoError(t, err) err = db.AddLog(createHash(3), bl16, 0, nil) require.NoError(t, err) err = db.AddLog(createHash(4), bl16, 1, nil) require.NoError(t, err) bl17 := eth.BlockID{Hash: createHash(17), Number: 17} - err = db.SealBlock(bl16.Hash, bl17, 5003) + err = db.SealBlock(bl16.Hash, bl17, t17) require.NoError(t, err) }, func(t *testing.T, db *DB, m *stubMetrics) { require.EqualValues(t, 2+2+1+1+2+1+1+2, m.entryCount, "should not output new searchCheckpoint for every block") - requireContains(t, db, 16, 0, createHash(1)) - requireContains(t, db, 16, 1, createHash(2)) - requireContains(t, db, 17, 0, createHash(3)) - requireContains(t, db, 17, 1, createHash(4)) + requireContains(t, db, 16, 0, t16, createHash(1)) + requireContains(t, db, 16, 1, t16, createHash(2)) + requireContains(t, db, 17, 0, t17, createHash(3)) + requireContains(t, db, 17, 1, t17, createHash(4)) }) }) @@ -489,10 +490,11 @@ func TestAddLog(t *testing.T) { // Block 3 logs extend to immediately before the fourth search-checkpoint block3LogCount := searchCheckpointFrequency - 19 block4LogCount := 2 + t0, t1, t2, t3, t4 := uint64(3000), uint64(3001), uint64(3002), uint64(3003), uint64(3003) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { // force in block 0 - require.NoError(t, db.lastEntryContext.forceBlock(block0, 3000)) + require.NoError(t, db.lastEntryContext.forceBlock(block0, t0)) expectedIndex := entrydb.EntryIdx(2) t.Logf("block 0 complete, at entry %d", db.lastEntryContext.NextIndex()) require.Equal(t, expectedIndex, db.lastEntryContext.NextIndex()) @@ -501,7 +503,7 @@ func TestAddLog(t *testing.T) { err := db.AddLog(createHash(i), block0, uint32(i), nil) require.NoError(t, err) } - err := db.SealBlock(block0.Hash, block1, 3001) // second seal-checkpoint + err := db.SealBlock(block0.Hash, block1, t1) // second seal-checkpoint require.NoError(t, err) } expectedIndex += entrydb.EntryIdx(block1LogCount) + 2 @@ -513,7 +515,7 @@ func TestAddLog(t *testing.T) { err := db.AddLog(createHash(i), block1, uint32(i), nil) require.NoError(t, err) } - err := db.SealBlock(block1.Hash, block2, 3002) // third seal-checkpoint + err := db.SealBlock(block1.Hash, block2, t2) // third seal-checkpoint require.NoError(t, err) } expectedIndex += entrydb.EntryIdx(block2LogCount) + 2 + 2 + 2 @@ -524,7 +526,7 @@ func TestAddLog(t *testing.T) { err := db.AddLog(createHash(i), block2, uint32(i), nil) require.NoError(t, err) } - err := db.SealBlock(block2.Hash, block3, 3003) + err := db.SealBlock(block2.Hash, block3, t3) require.NoError(t, err) } expectedIndex += entrydb.EntryIdx(block3LogCount) + 2 @@ -542,7 +544,7 @@ func TestAddLog(t *testing.T) { err := db.AddLog(createHash(i), block3, uint32(i), nil) require.NoError(t, err) } - err := db.SealBlock(block3.Hash, block4, 3003) // fourth seal checkpoint + err := db.SealBlock(block3.Hash, block4, t4) // fourth seal checkpoint require.NoError(t, err) } expectedIndex += entrydb.EntryIdx(block4LogCount) + 2 + 2 @@ -556,19 +558,19 @@ func TestAddLog(t *testing.T) { require.EqualValues(t, expectedEntryCount, m.entryCount) // Check we can find all the logs. for i := 0; i < block1LogCount; i++ { - requireContains(t, db, block1.Number, uint32(i), createHash(i)) + requireContains(t, db, block1.Number, uint32(i), t1, createHash(i)) } // Block 2 logs extend to just after the third checkpoint for i := 0; i < block2LogCount; i++ { - requireContains(t, db, block2.Number, uint32(i), createHash(i)) + requireContains(t, db, block2.Number, uint32(i), t2, createHash(i)) } // Block 3 logs extend to immediately before the fourth checkpoint for i := 0; i < block3LogCount; i++ { - requireContains(t, db, block3.Number, uint32(i), createHash(i)) + requireContains(t, db, block3.Number, uint32(i), t3, createHash(i)) } // Block 4 logs start immediately after the fourth checkpoint for i := 0; i < block4LogCount; i++ { - requireContains(t, db, block4.Number, uint32(i), createHash(i)) + requireContains(t, db, block4.Number, uint32(i), t4, createHash(i)) } }) }) @@ -593,7 +595,7 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.SealBlock(bl15.Hash, bl16, 5002)) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 16, 0, createHash(1), execMsg) + requireContains(t, db, 16, 0, 5002, createHash(1), execMsg) }) }) @@ -615,8 +617,8 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.SealBlock(bl16.Hash, bl17, 5002)) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 16, 0, createHash(9)) - requireContains(t, db, 17, 0, createHash(1), execMsg) + requireContains(t, db, 16, 0, 5001, createHash(9)) + requireContains(t, db, 17, 0, 5002, createHash(1), execMsg) }) }) @@ -647,8 +649,8 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.SealBlock(bl15.Hash, bl16, 5001)) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 16, 251, createHash(9)) - requireContains(t, db, 16, 252, createHash(1), execMsg) + requireContains(t, db, 16, 251, 5001, createHash(9)) + requireContains(t, db, 16, 252, 5001, createHash(1), execMsg) }) }) @@ -678,45 +680,57 @@ func TestAddDependentLog(t *testing.T) { require.NoError(t, db.SealBlock(bl15.Hash, bl16, 5001)) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 16, 252, createHash(9)) - requireContains(t, db, 16, 253, createHash(1), execMsg) + requireContains(t, db, 16, 252, 5001, createHash(9)) + requireContains(t, db, 16, 253, 5001, createHash(1), execMsg) }) }) } func TestContains(t *testing.T) { + // t53 and t54 are not not expected to be in the database because those blocks are never Sealed + t50, t51, t52, t53, t54 := uint64(5000), uint64(5001), uint64(5001), uint64(5003), uint64(5004) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.lastEntryContext.forceBlock(bl50, 5000)) + require.NoError(t, db.lastEntryContext.forceBlock(bl50, t50)) require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(3), bl50, 1, nil)) require.NoError(t, db.AddLog(createHash(2), bl50, 2, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} - require.NoError(t, db.SealBlock(bl50.Hash, bl51, 5001)) + require.NoError(t, db.SealBlock(bl50.Hash, bl51, t51)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} - require.NoError(t, db.SealBlock(bl51.Hash, bl52, 5001)) + require.NoError(t, db.SealBlock(bl51.Hash, bl52, t52)) require.NoError(t, db.AddLog(createHash(1), bl52, 0, nil)) require.NoError(t, db.AddLog(createHash(3), bl52, 1, nil)) }, func(t *testing.T, db *DB, m *stubMetrics) { // Should find added logs - requireContains(t, db, 51, 0, createHash(1)) - requireContains(t, db, 51, 1, createHash(3)) - requireContains(t, db, 51, 2, createHash(2)) - requireFuture(t, db, 53, 0, createHash(1)) - requireFuture(t, db, 53, 1, createHash(3)) + requireContains(t, db, 51, 0, t51, createHash(1)) + requireContains(t, db, 51, 1, t51, createHash(3)) + requireContains(t, db, 51, 2, t51, createHash(2)) + requireFuture(t, db, 53, 0, t53, createHash(1)) + requireFuture(t, db, 53, 1, t53, createHash(3)) + // When the block is in the future but the timestamp is within the database, + // ErrConflict is returned, because the timestamp invariant is broken. + requireConflicts(t, db, 53, 1, t50, createHash(3)) + // However, when the timestamp is equal to the last timestamp in the database, + // ErrFuture is used because the timestamp may be equal between blocks. + requireFuture(t, db, 53, 1, t52, createHash(3)) // 52 was sealed as empty - requireConflicts(t, db, 52, 0, createHash(1)) + requireConflicts(t, db, 52, 0, t52, createHash(1)) // 53 only contained 2 logs, not 3, and is not sealed yet - requireFuture(t, db, 53, 2, createHash(3)) + requireFuture(t, db, 53, 2, t53, createHash(3)) // 54 doesn't exist yet - requireFuture(t, db, 54, 0, createHash(3)) + requireFuture(t, db, 54, 0, t54, createHash(3)) // 51 only contained 3 logs, not 4 - requireConflicts(t, db, 51, 3, createHash(2)) + requireConflicts(t, db, 51, 3, t51, createHash(2)) + + // when the timestamp invariant is broken, ErrConflict is returned + requireConflicts(t, db, 51, 2, 4000, createHash(2)) // 4000 != 5001 + }) } @@ -742,17 +756,18 @@ func TestExecutes(t *testing.T) { Timestamp: 6578567, Hash: createHash(778889), } + t50, t51, t52, t53, t54 := uint64(500), uint64(5001), uint64(5002), uint64(5003), uint64(5004) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.lastEntryContext.forceBlock(bl50, 500)) + require.NoError(t, db.lastEntryContext.forceBlock(bl50, t50)) require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(3), bl50, 1, &execMsg1)) require.NoError(t, db.AddLog(createHash(2), bl50, 2, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} - require.NoError(t, db.SealBlock(bl50.Hash, bl51, 5001)) + require.NoError(t, db.SealBlock(bl50.Hash, bl51, t51)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} - require.NoError(t, db.SealBlock(bl51.Hash, bl52, 5001)) + require.NoError(t, db.SealBlock(bl51.Hash, bl52, t52)) require.NoError(t, db.AddLog(createHash(1), bl52, 0, &execMsg2)) require.NoError(t, db.AddLog(createHash(3), bl52, 1, &execMsg3)) }, @@ -765,15 +780,15 @@ func TestExecutes(t *testing.T) { requireExecutingMessage(t, db, 53, 1, execMsg3) // 52 was sealed without logs - requireConflicts(t, db, 52, 0, createHash(1)) + requireConflicts(t, db, 52, 0, t52, createHash(1)) // 53 only contained 2 logs, not 3, and is not sealed yet - requireFuture(t, db, 53, 2, createHash(3)) + requireFuture(t, db, 53, 2, t53, createHash(3)) // 54 doesn't exist yet - requireFuture(t, db, 54, 0, createHash(3)) + requireFuture(t, db, 54, 0, t54, createHash(3)) // 51 only contained 3 logs, not 4 - requireConflicts(t, db, 51, 3, createHash(2)) + requireConflicts(t, db, 51, 3, t51, createHash(2)) // 51 contains an executing message, and 2 other non-executing logs ref, logCount, execMsgs, err := db.OpenBlock(51) @@ -833,11 +848,16 @@ func TestGetBlockInfo(t *testing.T) { }) } -func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash, execMsg ...types.ExecutingMessage) { +func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, timestamp uint64, logHash common.Hash, execMsg ...types.ExecutingMessage) { require.LessOrEqual(t, len(execMsg), 1, "cannot have multiple executing messages for a single log") m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, logHash) + _, err := db.Contains(types.ContainsQuery{ + Timestamp: timestamp, + BlockNum: blockNum, + LogIdx: logIdx, + LogHash: logHash, + }) require.NoErrorf(t, err, "Error searching for log %v in block %v", logIdx, blockNum) require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") require.NotZero(t, m.entriesReadForSearch, "Must read at least some entries to find the log") @@ -849,20 +869,28 @@ func requireContains(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHa requireExecutingMessage(t, db, blockNum, logIdx, expectedExecMsg) } -func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { +func requireConflicts(t *testing.T, db *DB, blockNum uint64, logIdx uint32, timestamp uint64, logHash common.Hash) { m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, logHash) - require.ErrorIs(t, err, types.ErrConflict, "canonical chain must not include this log") + _, err := db.Contains(types.ContainsQuery{ + Timestamp: timestamp, + BlockNum: blockNum, + LogIdx: logIdx, + LogHash: logHash, + }) require.ErrorIs(t, err, types.ErrConflict, "canonical chain must not include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } -func requireFuture(t *testing.T, db *DB, blockNum uint64, logIdx uint32, logHash common.Hash) { +func requireFuture(t *testing.T, db *DB, blockNum uint64, logIdx uint32, timestamp uint64, logHash common.Hash) { m, ok := db.m.(*stubMetrics) require.True(t, ok, "Did not get the expected metrics type") - _, err := db.Contains(blockNum, logIdx, logHash) - require.ErrorIs(t, err, types.ErrFuture, "canonical chain does not yet include this log") + _, err := db.Contains(types.ContainsQuery{ + Timestamp: timestamp, + BlockNum: blockNum, + LogIdx: logIdx, + LogHash: logHash, + }) require.ErrorIs(t, err, types.ErrFuture, "canonical chain does not yet include this log") require.LessOrEqual(t, m.entriesReadForSearch, int64(searchCheckpointFrequency*2), "Should not need to read more than between two checkpoints") } @@ -915,7 +943,7 @@ func TestRecoverOnCreate(t *testing.T) { db, m, err := createDb(t, store) require.NoError(t, err) require.EqualValues(t, int64(4*2+3), m.entryCount) - requireContains(t, db, 4, 0, createHash(1)) + requireContains(t, db, 4, 0, 104, createHash(1)) }) t.Run("NoTruncateWhenLastEntryIsExecutingCheckSealed", func(t *testing.T) { @@ -944,7 +972,7 @@ func TestRecoverOnCreate(t *testing.T) { db, m, err := createDb(t, store) require.NoError(t, err) require.EqualValues(t, int64(3*2+5), m.entryCount) - requireContains(t, db, 3, 0, createHash(1111), execMsg) + requireContains(t, db, 3, 0, 103, createHash(1111), execMsg) }) t.Run("TruncateWhenLastEntrySearchCheckpoint", func(t *testing.T) { @@ -1033,36 +1061,38 @@ func TestRewind(t *testing.T) { }) t.Run("AfterLastBlock", func(t *testing.T) { + t50, t51, t52, t53 := uint64(500), uint64(502), uint64(504), uint64(506) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) + require.NoError(t, db.SealBlock(createHash(49), bl50, t50)) require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} - require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) + require.NoError(t, db.SealBlock(bl50.Hash, bl51, t51)) require.NoError(t, db.AddLog(createHash(3), bl51, 0, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} - require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) + require.NoError(t, db.SealBlock(bl51.Hash, bl52, t52)) require.NoError(t, db.AddLog(createHash(4), bl52, 0, nil)) // cannot rewind to a block that is not sealed yet require.ErrorIs(t, db.Rewind(createID(53)), types.ErrFuture) require.ErrorIs(t, db.Rewind(createID(53)), types.ErrFuture) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 51, 0, createHash(1)) - requireContains(t, db, 51, 1, createHash(2)) - requireContains(t, db, 52, 0, createHash(3)) + requireContains(t, db, 51, 0, t51, createHash(1)) + requireContains(t, db, 51, 1, t51, createHash(2)) + requireContains(t, db, 52, 0, t52, createHash(3)) // Still have the pending log of unsealed block if the rewind to unknown sealed block fails - requireFuture(t, db, 53, 0, createHash(4)) + requireFuture(t, db, 53, 0, t53, createHash(4)) }) }) t.Run("BeforeFirstBlock", func(t *testing.T) { + t50, t51 := uint64(500), uint64(501) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) + require.NoError(t, db.SealBlock(createHash(49), bl50, t50)) require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) // cannot go back to an unknown block @@ -1071,39 +1101,41 @@ func TestRewind(t *testing.T) { }, func(t *testing.T, db *DB, m *stubMetrics) { // block 51 is not sealed yet - requireFuture(t, db, 51, 0, createHash(1)) - requireFuture(t, db, 51, 0, createHash(1)) + requireFuture(t, db, 51, 0, t51, createHash(1)) + requireFuture(t, db, 51, 0, t51, createHash(1)) }) }) t.Run("AtFirstBlock", func(t *testing.T) { + t50, t51, t52 := uint64(500), uint64(502), uint64(504) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) + require.NoError(t, db.SealBlock(createHash(49), bl50, t50)) require.NoError(t, db.AddLog(createHash(1), bl50, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl50, 1, nil)) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} - require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) + require.NoError(t, db.SealBlock(bl50.Hash, bl51, t51)) require.NoError(t, db.AddLog(createHash(1), bl51, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl51, 1, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} - require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) + require.NoError(t, db.SealBlock(bl51.Hash, bl52, t52)) require.NoError(t, db.Rewind(createID(51))) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 51, 0, createHash(1)) - requireContains(t, db, 51, 1, createHash(2)) - requireFuture(t, db, 52, 0, createHash(1)) - requireFuture(t, db, 52, 1, createHash(2)) + requireContains(t, db, 51, 0, t51, createHash(1)) + requireContains(t, db, 51, 1, t51, createHash(2)) + requireFuture(t, db, 52, 0, t52, createHash(1)) + requireFuture(t, db, 52, 1, t52, createHash(2)) }) }) t.Run("AfterSecondCheckpoint", func(t *testing.T) { + t50, t51, t52 := uint64(500), uint64(502), uint64(504) runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { bl50 := eth.BlockID{Hash: createHash(50), Number: 50} - require.NoError(t, db.SealBlock(createHash(49), bl50, 500)) + require.NoError(t, db.SealBlock(createHash(49), bl50, t50)) for i := uint32(0); m.entryCount < searchCheckpointFrequency; i++ { require.NoError(t, db.AddLog(createHash(1), bl50, i, nil)) } @@ -1112,30 +1144,35 @@ func TestRewind(t *testing.T) { // Thus add 2 for the checkpoint. require.EqualValues(t, searchCheckpointFrequency+2, m.entryCount) bl51 := eth.BlockID{Hash: createHash(51), Number: 51} - require.NoError(t, db.SealBlock(bl50.Hash, bl51, 502)) + require.NoError(t, db.SealBlock(bl50.Hash, bl51, t51)) require.NoError(t, db.AddLog(createHash(1), bl51, 0, nil)) require.EqualValues(t, searchCheckpointFrequency+2+3, m.entryCount, "Should have inserted new checkpoint and extra log") require.NoError(t, db.AddLog(createHash(2), bl51, 1, nil)) bl52 := eth.BlockID{Hash: createHash(52), Number: 52} - require.NoError(t, db.SealBlock(bl51.Hash, bl52, 504)) + require.NoError(t, db.SealBlock(bl51.Hash, bl52, t52)) require.NoError(t, db.Rewind(createID(51))) }, func(t *testing.T, db *DB, m *stubMetrics) { require.EqualValues(t, searchCheckpointFrequency+2+2, m.entryCount, "Should have deleted second checkpoint") - requireContains(t, db, 51, 0, createHash(1)) - requireContains(t, db, 51, 1, createHash(1)) - requireFuture(t, db, 52, 0, createHash(1)) - requireFuture(t, db, 52, 1, createHash(2)) + requireContains(t, db, 51, 0, t51, createHash(1)) + requireContains(t, db, 51, 1, t51, createHash(1)) + requireFuture(t, db, 52, 0, t52, createHash(1)) + requireFuture(t, db, 52, 1, t52, createHash(2)) }) }) + // helper function for the below test cases which generate multiple timestamps + tOffset := func(i int) uint64 { + return uint64(500 + i) + } + t.Run("BetweenBlockEntries", func(t *testing.T) { runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { // create many blocks, and all the odd blocks get 2 logs for i := uint32(0); i < 30; i++ { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} - require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) + require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, tOffset(int(i)))) if i%2 == 0 { require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) @@ -1144,10 +1181,10 @@ func TestRewind(t *testing.T) { require.NoError(t, db.Rewind(createID(15))) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 15, 0, createHash(1)) - requireContains(t, db, 15, 1, createHash(2)) - requireFuture(t, db, 16, 0, createHash(1)) - requireFuture(t, db, 16, 1, createHash(2)) + requireContains(t, db, 15, 0, tOffset(15), createHash(1)) + requireContains(t, db, 15, 1, tOffset(15), createHash(2)) + requireFuture(t, db, 16, 0, tOffset(16), createHash(1)) + requireFuture(t, db, 16, 1, tOffset(16), createHash(2)) }) }) @@ -1157,7 +1194,7 @@ func TestRewind(t *testing.T) { // create many blocks, and all the even blocks get 2 logs for i := uint32(0); i <= 30; i++ { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} - require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) + require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, tOffset(int(i)))) if i%2 == 1 { require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) @@ -1167,11 +1204,11 @@ func TestRewind(t *testing.T) { require.NoError(t, db.Rewind(createID(30))) }, func(t *testing.T, db *DB, m *stubMetrics) { - requireContains(t, db, 20, 0, createHash(1)) - requireContains(t, db, 20, 1, createHash(2)) + requireContains(t, db, 20, 0, tOffset(20), createHash(1)) + requireContains(t, db, 20, 1, tOffset(20), createHash(2)) // built on top of 29, these are in sealed block 30, still around - requireContains(t, db, 30, 0, createHash(1)) - requireContains(t, db, 30, 1, createHash(2)) + requireContains(t, db, 30, 0, tOffset(30), createHash(1)) + requireContains(t, db, 30, 1, tOffset(30), createHash(2)) }) }) @@ -1181,7 +1218,7 @@ func TestRewind(t *testing.T) { // create many blocks, and all the odd blocks get 2 logs for i := uint32(0); i < 30; i++ { bl := eth.BlockID{Hash: createHash(int(i)), Number: uint64(i)} - require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, 500+uint64(i))) + require.NoError(t, db.SealBlock(createHash(int(i)-1), bl, tOffset(int(i)))) if i%2 == 0 { require.NoError(t, db.AddLog(createHash(1), bl, 0, nil)) require.NoError(t, db.AddLog(createHash(2), bl, 1, nil)) @@ -1206,7 +1243,7 @@ func TestRewind(t *testing.T) { err = db.AddLog(createHash(42), bl16, 0, nil) require.NoError(t, err) // not sealed yet - requireFuture(t, db, 17, 0, createHash(42)) + requireFuture(t, db, 17, 0, tOffset(17), createHash(42)) }) }) } diff --git a/op-supervisor/supervisor/backend/db/open.go b/op-supervisor/supervisor/backend/db/open.go index 6a4eee0f047..8e6b44edea0 100644 --- a/op-supervisor/supervisor/backend/db/open.go +++ b/op-supervisor/supervisor/backend/db/open.go @@ -22,8 +22,8 @@ func OpenLogDB(logger log.Logger, chainID eth.ChainID, dataDir string, m logs.Me return logDB, nil } -func OpenLocalDerivedFromDB(logger log.Logger, chainID eth.ChainID, dataDir string, m fromda.ChainMetrics) (*fromda.DB, error) { - path, err := prepLocalDerivedFromDBPath(chainID, dataDir) +func OpenLocalDerivationDB(logger log.Logger, chainID eth.ChainID, dataDir string, m fromda.ChainMetrics) (*fromda.DB, error) { + path, err := prepLocalDerivationDBPath(chainID, dataDir) if err != nil { return nil, fmt.Errorf("failed to prepare datadir for chain %s: %w", chainID, err) } @@ -34,8 +34,8 @@ func OpenLocalDerivedFromDB(logger log.Logger, chainID eth.ChainID, dataDir stri return db, nil } -func OpenCrossDerivedFromDB(logger log.Logger, chainID eth.ChainID, dataDir string, m fromda.ChainMetrics) (*fromda.DB, error) { - path, err := prepCrossDerivedFromDBPath(chainID, dataDir) +func OpenCrossDerivationDB(logger log.Logger, chainID eth.ChainID, dataDir string, m fromda.ChainMetrics) (*fromda.DB, error) { + path, err := prepCrossDerivationDBPath(chainID, dataDir) if err != nil { return nil, fmt.Errorf("failed to prepare datadir for chain %s: %w", chainID, err) } diff --git a/op-supervisor/supervisor/backend/db/query.go b/op-supervisor/supervisor/backend/db/query.go index 4c24f2b9c4a..04696be0f57 100644 --- a/op-supervisor/supervisor/backend/db/query.go +++ b/op-supervisor/supervisor/backend/db/query.go @@ -4,8 +4,6 @@ import ( "errors" "fmt" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -41,7 +39,7 @@ func (db *ChainsDB) LastCommonL1() (types.BlockSeal, error) { if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - last, err := ldb.Latest() + last, err := ldb.Last() if err != nil { return types.BlockSeal{}, fmt.Errorf("failed to determine Last Common L1: %w", err) } @@ -49,8 +47,8 @@ func (db *ChainsDB) LastCommonL1() (types.BlockSeal, error) { // or if the new common block is older than the current common block // set the common block if commonL1 == (types.BlockSeal{}) || - last.DerivedFrom.Number < commonL1.Number { - commonL1 = last.DerivedFrom + last.Source.Number < commonL1.Number { + commonL1 = last.Source } } return commonL1, nil @@ -108,7 +106,7 @@ func (db *ChainsDB) SafeDerivedAt(chainID eth.ChainID, derivedFrom eth.BlockID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - derived, err := lDB.LastDerivedAt(derivedFrom) + derived, err := lDB.SourceToLastDerived(derivedFrom) if err != nil { return types.BlockSeal{}, fmt.Errorf("failed to find derived block %s: %w", derivedFrom, err) } @@ -149,7 +147,7 @@ func (db *ChainsDB) AcceptedBlock(chainID eth.ChainID, id eth.BlockID) error { if !ok { return types.ErrUnknownChain } - latest, err := localDB.Latest() + latest, err := localDB.Last() if err != nil { // If we have invalidated the latest block, figure out what it is. // Only the tip can be invalidated. So if the block we check is older, it still can be accepted. @@ -163,7 +161,7 @@ func (db *ChainsDB) AcceptedBlock(chainID eth.ChainID, id eth.BlockID) error { types.ErrAwaitReplacementBlock) } // If it's older, we should check if the local-safe DB matches. - return localDB.IsDerived(id) + return localDB.ContainsDerived(id) } else { return fmt.Errorf("failed to read latest local-safe block: %w", err) } @@ -172,7 +170,7 @@ func (db *ChainsDB) AcceptedBlock(chainID eth.ChainID, id eth.BlockID) error { return nil } // If it's older, we should check if the local-safe DB matches. - return localDB.IsDerived(id) + return localDB.ContainsDerived(id) } func (db *ChainsDB) LocalSafe(chainID eth.ChainID) (pair types.DerivedBlockSealPair, err error) { @@ -180,7 +178,7 @@ func (db *ChainsDB) LocalSafe(chainID eth.ChainID) (pair types.DerivedBlockSealP if !ok { return types.DerivedBlockSealPair{}, types.ErrUnknownChain } - return localDB.Latest() + return localDB.Last() } func (db *ChainsDB) CrossSafe(chainID eth.ChainID) (pair types.DerivedBlockSealPair, err error) { @@ -188,7 +186,7 @@ func (db *ChainsDB) CrossSafe(chainID eth.ChainID) (pair types.DerivedBlockSealP if !ok { return types.DerivedBlockSealPair{}, types.ErrUnknownChain } - return crossDB.Latest() + return crossDB.Last() } func (db *ChainsDB) FinalizedL1() eth.BlockRef { @@ -206,51 +204,51 @@ func (db *ChainsDB) Finalized(chainID eth.ChainID) (types.BlockSeal, error) { if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - latest, err := xDB.Latest() + latest, err := xDB.Last() if err != nil { return types.BlockSeal{}, fmt.Errorf("could not get the latest derived pair for chain %s: %w", chainID, err) } // if the finalized L1 block is newer than the latest L1 block used to derive L2 blocks, // the finality signal automatically applies to all previous blocks, including the latest derived block - if finalizedL1.Number > latest.DerivedFrom.Number { + if finalizedL1.Number > latest.Source.Number { db.logger.Warn("Finalized L1 block is newer than the latest L1 for this chain. Assuming latest L2 is finalized", "chain", chainID, "finalizedL1", finalizedL1.Number, - "latestDerivedFrom", latest.DerivedFrom.Number, - "latestDerived", latest.DerivedFrom) + "latestSource", latest.Source.Number, + "latestDerived", latest.Source) return latest.Derived, nil } // otherwise, use the finalized L1 block to determine the final L2 block that was derived from it - derived, err := db.LastCrossDerivedFrom(chainID, finalizedL1.ID()) + derived, err := db.CrossSourceToLastDerived(chainID, finalizedL1.ID()) if err != nil { return types.BlockSeal{}, fmt.Errorf("could not find what was last derived in L2 chain %s from the finalized L1 block %s: %w", chainID, finalizedL1, err) } return derived, nil } -func (db *ChainsDB) LastCrossDerivedFrom(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { +func (db *ChainsDB) CrossSourceToLastDerived(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { crossDB, ok := db.crossDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - return crossDB.LastDerivedAt(derivedFrom) + return crossDB.SourceToLastDerived(derivedFrom) } -// CrossDerivedFromBlockRef returns the block that the given block was derived from, if it exists in the cross derived-from storage. -// This includes the parent-block lookup. Use CrossDerivedFrom if no parent-block info is needed. -func (db *ChainsDB) CrossDerivedFromBlockRef(chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { +// CrossDerivedToSourceRef returns the block that the given block was derived from, if it exists in the cross derived-from storage. +// This call requires the block to have a parent to be turned into a Ref. Use CrossDerivedToSource if the parent is not needed. +func (db *ChainsDB) CrossDerivedToSourceRef(chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { xdb, ok := db.crossDBs.Get(chainID) if !ok { return eth.BlockRef{}, types.ErrUnknownChain } - res, err := xdb.DerivedFrom(derived) + res, err := xdb.DerivedToFirstSource(derived) if err != nil { return eth.BlockRef{}, err } - parent, err := xdb.PreviousDerivedFrom(res.ID()) - // if we are working with the first item in the database, PreviousDerivedFrom will return ErrPreviousToFirst - // in which case we can attach a zero parent to the cross-derived-from block, as the parent block is unknown + parent, err := xdb.PreviousSource(res.ID()) + // if we are working with the first item in the database, PreviousSource will return ErrPreviousToFirst + // in which case we can attach a zero parent to the block, as the parent block is unknown if errors.Is(err, types.ErrPreviousToFirst) { return res.ForceWithParent(eth.BlockID{}), nil } else if err != nil { @@ -259,26 +257,14 @@ func (db *ChainsDB) CrossDerivedFromBlockRef(chainID eth.ChainID, derived eth.Bl return res.MustWithParent(parent.ID()), nil } -// Check calls the underlying logDB to determine if the given log entry exists at the given location. +// Contains calls the underlying logDB to determine if the given log entry exists at the given location. // If the block-seal of the block that includes the log is known, it is returned. It is fully zeroed otherwise, if the block is in-progress. -func (db *ChainsDB) Check(chain eth.ChainID, blockNum uint64, timestamp uint64, logIdx uint32, logHash common.Hash) (includedIn types.BlockSeal, err error) { +func (db *ChainsDB) Contains(chain eth.ChainID, q types.ContainsQuery) (includedIn types.BlockSeal, err error) { logDB, ok := db.logDBs.Get(chain) if !ok { return types.BlockSeal{}, fmt.Errorf("%w: %v", types.ErrUnknownChain, chain) } - includedIn, err = logDB.Contains(blockNum, logIdx, logHash) - if err != nil { - return types.BlockSeal{}, err - } - if includedIn.Timestamp != timestamp { - return types.BlockSeal{}, - fmt.Errorf("log exists in block %s, but block timestamp %d does not match %d: %w", - includedIn, - includedIn.Timestamp, - timestamp, - types.ErrConflict) - } - return includedIn, nil + return logDB.Contains(q) } // OpenBlock returns the Executing Messages for the block at the given number on the given chain. @@ -291,24 +277,24 @@ func (db *ChainsDB) OpenBlock(chainID eth.ChainID, blockNum uint64) (seal eth.Bl return logDB.OpenBlock(blockNum) } -// LocalDerivedFrom returns the block that the given block was derived from, if it exists in the local derived-from storage. +// LocalDerivedToSource returns the block that the given block was derived from, if it exists in the local derived-from storage. // it routes the request to the appropriate localDB. -func (db *ChainsDB) LocalDerivedFrom(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { +func (db *ChainsDB) LocalDerivedToSource(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { lDB, ok := db.localDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - return lDB.DerivedFrom(derived) + return lDB.DerivedToFirstSource(derived) } -// CrossDerivedFrom returns the block that the given block was derived from, if it exists in the cross derived-from storage. +// CrossDerivedToSource returns the block that the given block was derived from, if it exists in the cross derived-from storage. // it routes the request to the appropriate crossDB. -func (db *ChainsDB) CrossDerivedFrom(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { +func (db *ChainsDB) CrossDerivedToSource(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { xDB, ok := db.crossDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - return xDB.DerivedFrom(derived) + return xDB.DerivedToFirstSource(derived) } // CandidateCrossSafe returns the candidate local-safe block that may become cross-safe, @@ -342,7 +328,7 @@ func (db *ChainsDB) CandidateCrossSafe(chain eth.ChainID) (result types.DerivedB // (D, 2) -> 2 is in scope, stay on D, promote candidate to cross-safe // (D, 3) -> look at 3 next, see if we have to bump L1 yet, try with same L1 scope first - crossSafe, err := xDB.Latest() + crossSafe, err := xDB.Last() if err != nil { if errors.Is(err, types.ErrFuture) { // If we do not have any cross-safe block yet, then return the first local-safe block. @@ -351,17 +337,17 @@ func (db *ChainsDB) CandidateCrossSafe(chain eth.ChainID) (result types.DerivedB return types.DerivedBlockRefPair{}, fmt.Errorf("failed to find first local-safe block: %w", err) } // the first derivedFrom (L1 block) is unlikely to be the genesis block, - derivedFromRef, err := first.DerivedFrom.WithParent(eth.BlockID{}) + derivedFromRef, err := first.Source.WithParent(eth.BlockID{}) if err != nil { // if the first derivedFrom isn't the genesis block, just warn and continue anyway - db.logger.Warn("First DerivedFrom is not genesis block") - derivedFromRef = first.DerivedFrom.ForceWithParent(eth.BlockID{}) + db.logger.Warn("First Source is not genesis block") + derivedFromRef = first.Source.ForceWithParent(eth.BlockID{}) } // the first derived must be the genesis block, panic otherwise derivedRef := first.Derived.MustWithParent(eth.BlockID{}) return types.DerivedBlockRefPair{ - DerivedFrom: derivedFromRef, - Derived: derivedRef, + Source: derivedFromRef, + Derived: derivedRef, }, nil } return types.DerivedBlockRefPair{}, err @@ -385,42 +371,42 @@ func (db *ChainsDB) CandidateCrossSafe(chain eth.ChainID) (result types.DerivedB candidateRef := candidatePair.Derived.MustWithParent(crossSafe.Derived.ID()) - parentDerivedFrom, err := lDB.PreviousDerivedFrom(candidatePair.DerivedFrom.ID()) - // if we are working with the first item in the database, PreviousDerivedFrom will return ErrPreviousToFirst + parentSource, err := lDB.PreviousSource(candidatePair.Source.ID()) + // if we are working with the first item in the database, PreviousSource will return ErrPreviousToFirst // in which case we can attach a zero parent to the cross-derived-from block, as the parent block is unknown if errors.Is(err, types.ErrPreviousToFirst) { - parentDerivedFrom = types.BlockSeal{} + parentSource = types.BlockSeal{} } else if err != nil { - return types.DerivedBlockRefPair{}, fmt.Errorf("failed to find parent-block of derived-from %s: %w", candidatePair.DerivedFrom, err) + return types.DerivedBlockRefPair{}, fmt.Errorf("failed to find parent-block of derived-from %s: %w", candidatePair.Source, err) } - candidateFromRef := candidatePair.DerivedFrom.MustWithParent(parentDerivedFrom.ID()) + candidateFromRef := candidatePair.Source.MustWithParent(parentSource.ID()) // Allow increment of DA by 1, if we know the floor (due to local safety) is 1 ahead of the current cross-safe L1 scope. - if candidatePair.DerivedFrom.Number > crossSafe.DerivedFrom.Number+1 { + if candidatePair.Source.Number > crossSafe.Source.Number+1 { // If we are not ready to process the candidate block, // then we need to stick to the current scope, so the caller can bump up from there. - var crossDerivedFromRef eth.BlockRef - parent, err := lDB.PreviousDerivedFrom(crossSafe.DerivedFrom.ID()) - // if we are working with the first item in the database, PreviousDerivedFrom will return ErrPreviousToFirst + var crossSourceRef eth.BlockRef + parent, err := lDB.PreviousSource(crossSafe.Source.ID()) + // if we are working with the first item in the database, PreviousSource will return ErrPreviousToFirst // in which case we can attach a zero parent to the cross-derived-from block, as the parent block is unknown if errors.Is(err, types.ErrPreviousToFirst) { - crossDerivedFromRef = crossSafe.DerivedFrom.ForceWithParent(eth.BlockID{}) + crossSourceRef = crossSafe.Source.ForceWithParent(eth.BlockID{}) } else if err != nil { return types.DerivedBlockRefPair{}, - fmt.Errorf("failed to find parent-block of cross-derived-from %s: %w", crossSafe.DerivedFrom, err) + fmt.Errorf("failed to find parent-block of cross-derived-from %s: %w", crossSafe.Source, err) } else { - crossDerivedFromRef = crossSafe.DerivedFrom.MustWithParent(parent.ID()) + crossSourceRef = crossSafe.Source.MustWithParent(parent.ID()) } return types.DerivedBlockRefPair{ - DerivedFrom: crossDerivedFromRef, - Derived: eth.BlockRef{}, + Source: crossSourceRef, + Derived: eth.BlockRef{}, }, fmt.Errorf("candidate is from %s, while current scope is %s: %w", - candidateFromRef, crossSafe.DerivedFrom, types.ErrOutOfScope) + candidateFromRef, crossSafe.Source, types.ErrOutOfScope) } return types.DerivedBlockRefPair{ - DerivedFrom: candidateFromRef, - Derived: candidateRef, + Source: candidateFromRef, + Derived: candidateRef, }, nil } @@ -432,20 +418,20 @@ func (db *ChainsDB) PreviousDerived(chain eth.ChainID, derived eth.BlockID) (pre return lDB.PreviousDerived(derived) } -func (db *ChainsDB) PreviousDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { +func (db *ChainsDB) PreviousSource(chain eth.ChainID, derivedFrom eth.BlockID) (prevSource types.BlockSeal, err error) { lDB, ok := db.localDBs.Get(chain) if !ok { return types.BlockSeal{}, types.ErrUnknownChain } - return lDB.PreviousDerivedFrom(derivedFrom) + return lDB.PreviousSource(derivedFrom) } -func (db *ChainsDB) NextDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { +func (db *ChainsDB) NextSource(chain eth.ChainID, derivedFrom eth.BlockID) (after eth.BlockRef, err error) { lDB, ok := db.localDBs.Get(chain) if !ok { return eth.BlockRef{}, types.ErrUnknownChain } - v, err := lDB.NextDerivedFrom(derivedFrom) + v, err := lDB.NextSource(derivedFrom) if err != nil { return eth.BlockRef{}, err } diff --git a/op-supervisor/supervisor/backend/db/query_test.go b/op-supervisor/supervisor/backend/db/query_test.go index d734a2f8434..fbfb37697f3 100644 --- a/op-supervisor/supervisor/backend/db/query_test.go +++ b/op-supervisor/supervisor/backend/db/query_test.go @@ -13,59 +13,59 @@ import ( "github.com/stretchr/testify/require" ) -type mockDerivedFromStorage struct { - latestFn func() (pair types.DerivedBlockSealPair, err error) +type mockDerivationStorage struct { + lastFn func() (pair types.DerivedBlockSealPair, err error) } -func (m *mockDerivedFromStorage) First() (pair types.DerivedBlockSealPair, err error) { +func (m *mockDerivationStorage) First() (pair types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) Latest() (pair types.DerivedBlockSealPair, err error) { - if m.latestFn != nil { - return m.latestFn() +func (m *mockDerivationStorage) Last() (pair types.DerivedBlockSealPair, err error) { + if m.lastFn != nil { + return m.lastFn() } return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) Invalidated() (pair types.DerivedBlockSealPair, err error) { +func (m *mockDerivationStorage) Invalidated() (pair types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { +func (m *mockDerivationStorage) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { return nil } -func (m *mockDerivedFromStorage) ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidated common.Hash) (types.DerivedBlockSealPair, error) { +func (m *mockDerivationStorage) ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidated common.Hash) (types.DerivedBlockSealPair, error) { return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) RewindAndInvalidate(invalidated types.DerivedBlockRefPair) error { +func (m *mockDerivationStorage) RewindAndInvalidate(invalidated types.DerivedBlockRefPair) error { return nil } -func (m *mockDerivedFromStorage) LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { +func (m *mockDerivationStorage) SourceToLastDerived(source eth.BlockID) (derived types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockDerivedFromStorage) IsDerived(derived eth.BlockID) error { +func (m *mockDerivationStorage) ContainsDerived(derived eth.BlockID) error { return nil } -func (m *mockDerivedFromStorage) DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { +func (m *mockDerivationStorage) DerivedToFirstSource(derived eth.BlockID) (source types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockDerivedFromStorage) FirstAfter(derivedFrom, derived eth.BlockID) (next types.DerivedBlockSealPair, err error) { +func (m *mockDerivationStorage) Next(pair types.DerivedIDPair) (next types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) NextDerivedFrom(derivedFrom eth.BlockID) (nextDerivedFrom types.BlockSeal, err error) { +func (m *mockDerivationStorage) NextSource(source eth.BlockID) (nextSource types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockDerivedFromStorage) NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) { +func (m *mockDerivationStorage) NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, nil } -func (m *mockDerivedFromStorage) PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { +func (m *mockDerivationStorage) PreviousSource(source eth.BlockID) (prevSource types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockDerivedFromStorage) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) { +func (m *mockDerivationStorage) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) { return types.BlockSeal{}, nil } -func (m *mockDerivedFromStorage) RewindToScope(scope eth.BlockID) error { +func (m *mockDerivationStorage) RewindToScope(scope eth.BlockID) error { return nil } -func (m *mockDerivedFromStorage) RewindToFirstDerived(derived eth.BlockID) error { +func (m *mockDerivationStorage) RewindToFirstDerived(derived eth.BlockID) error { return nil } @@ -93,14 +93,14 @@ func sampleDepSet(t *testing.T) depset.DependencySet { } func TestCommonL1UnknownChain(t *testing.T) { - m1 := &mockDerivedFromStorage{} - m2 := &mockDerivedFromStorage{} + m1 := &mockDerivationStorage{} + m2 := &mockDerivationStorage{} logger := testlog.Logger(t, log.LevelDebug) chainDB := NewChainsDB(logger, sampleDepSet(t)) // add a mock local derived-from storage to drive the test - chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(900), m1) - chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(901), m2) + chainDB.AddLocalDerivationDB(eth.ChainIDFromUInt64(900), m1) + chainDB.AddLocalDerivationDB(eth.ChainIDFromUInt64(901), m2) // don't attach a mock for chain 902 _, err := chainDB.LastCommonL1() @@ -108,58 +108,58 @@ func TestCommonL1UnknownChain(t *testing.T) { } func TestCommonL1(t *testing.T) { - m1 := &mockDerivedFromStorage{} - m2 := &mockDerivedFromStorage{} - m3 := &mockDerivedFromStorage{} + m1 := &mockDerivationStorage{} + m2 := &mockDerivationStorage{} + m3 := &mockDerivationStorage{} logger := testlog.Logger(t, log.LevelDebug) chainDB := NewChainsDB(logger, sampleDepSet(t)) // add a mock local derived-from storage to drive the test - chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(900), m1) - chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(901), m2) - chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(902), m3) + chainDB.AddLocalDerivationDB(eth.ChainIDFromUInt64(900), m1) + chainDB.AddLocalDerivationDB(eth.ChainIDFromUInt64(901), m2) + chainDB.AddLocalDerivationDB(eth.ChainIDFromUInt64(902), m3) // returnN is a helper function which creates a Latest Function for the test returnN := func(n uint64) func() (pair types.DerivedBlockSealPair, err error) { return func() (pair types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Number: n, }, }, nil } } t.Run("pattern 1", func(t *testing.T) { - m1.latestFn = returnN(1) - m2.latestFn = returnN(2) - m3.latestFn = returnN(3) + m1.lastFn = returnN(1) + m2.lastFn = returnN(2) + m3.lastFn = returnN(3) latest, err := chainDB.LastCommonL1() require.NoError(t, err) require.Equal(t, uint64(1), latest.Number) }) t.Run("pattern 2", func(t *testing.T) { - m1.latestFn = returnN(3) - m2.latestFn = returnN(2) - m3.latestFn = returnN(1) + m1.lastFn = returnN(3) + m2.lastFn = returnN(2) + m3.lastFn = returnN(1) latest, err := chainDB.LastCommonL1() require.NoError(t, err) require.Equal(t, uint64(1), latest.Number) }) t.Run("pattern 3", func(t *testing.T) { - m1.latestFn = returnN(99) - m2.latestFn = returnN(1) - m3.latestFn = returnN(98) + m1.lastFn = returnN(99) + m2.lastFn = returnN(1) + m3.lastFn = returnN(98) latest, err := chainDB.LastCommonL1() require.NoError(t, err) require.Equal(t, uint64(1), latest.Number) }) t.Run("error", func(t *testing.T) { - m1.latestFn = returnN(99) - m2.latestFn = returnN(1) - m3.latestFn = func() (pair types.DerivedBlockSealPair, err error) { + m1.lastFn = returnN(99) + m2.lastFn = returnN(1) + m3.lastFn = func() (pair types.DerivedBlockSealPair, err error) { return types.DerivedBlockSealPair{}, fmt.Errorf("error") } latest, err := chainDB.LastCommonL1() diff --git a/op-supervisor/supervisor/backend/db/update.go b/op-supervisor/supervisor/backend/db/update.go index 5e3df48a4c3..72d4aad160c 100644 --- a/op-supervisor/supervisor/backend/db/update.go +++ b/op-supervisor/supervisor/backend/db/update.go @@ -91,8 +91,8 @@ func (db *ChainsDB) UpdateLocalSafe(chain eth.ChainID, derivedFrom eth.BlockRef, db.emitter.Emit(superevents.LocalSafeUpdateEvent{ ChainID: chain, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSealFromRef(derivedFrom), - Derived: types.BlockSealFromRef(lastDerived), + Source: types.BlockSealFromRef(derivedFrom), + Derived: types.BlockSealFromRef(lastDerived), }, }) } @@ -123,8 +123,8 @@ func (db *ChainsDB) UpdateCrossSafe(chain eth.ChainID, l1View eth.BlockRef, last db.emitter.Emit(superevents.CrossSafeUpdateEvent{ ChainID: chain, NewCrossSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSealFromRef(l1View), - Derived: types.BlockSealFromRef(lastCrossDerived), + Source: types.BlockSealFromRef(l1View), + Derived: types.BlockSealFromRef(lastCrossDerived), }, }) return nil @@ -246,7 +246,7 @@ func (db *ChainsDB) ResetCrossUnsafeIfNewerThan(chainID eth.ChainID, number uint if !ok { return fmt.Errorf("cannot find cross-safe DB of chain %s for invalidation: %w", chainID, types.ErrUnknownChain) } - crossSafe, err := crossSafeDB.Latest() + crossSafe, err := crossSafeDB.Last() if err != nil { return fmt.Errorf("cannot get cross-safe of chain %s: %w", chainID, err) } diff --git a/op-supervisor/supervisor/backend/mock.go b/op-supervisor/supervisor/backend/mock.go index 23148f93207..fdce4185c28 100644 --- a/op-supervisor/supervisor/backend/mock.go +++ b/op-supervisor/supervisor/backend/mock.go @@ -71,7 +71,7 @@ func (m *MockBackend) FinalizedL1() eth.BlockRef { return eth.BlockRef{} } -func (m *MockBackend) CrossDerivedFrom(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { +func (m *MockBackend) CrossDerivedToSource(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { return eth.BlockRef{}, nil } @@ -79,6 +79,10 @@ func (m *MockBackend) SuperRootAtTimestamp(ctx context.Context, timestamp hexuti return eth.SuperRootResponse{}, nil } +func (m *MockBackend) SyncStatus() (eth.SupervisorSyncStatus, error) { + return eth.SupervisorSyncStatus{}, nil +} + func (m *MockBackend) Close() error { return nil } diff --git a/op-supervisor/supervisor/backend/rewinder/rewinder.go b/op-supervisor/supervisor/backend/rewinder/rewinder.go index e1ff2849c0b..9fd1e7d661f 100644 --- a/op-supervisor/supervisor/backend/rewinder/rewinder.go +++ b/op-supervisor/supervisor/backend/rewinder/rewinder.go @@ -21,9 +21,9 @@ type l1Node interface { type rewinderDB interface { DependencySet() depset.DependencySet - LastCrossDerivedFrom(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) - PreviousDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) - CrossDerivedFromBlockRef(chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) + CrossSourceToLastDerived(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) + PreviousSource(chain eth.ChainID, source eth.BlockID) (prevSource types.BlockSeal, err error) + CrossDerivedToSourceRef(chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) LocalSafe(eth.ChainID) (types.DerivedBlockSealPair, error) CrossSafe(eth.ChainID) (types.DerivedBlockSealPair, error) @@ -35,7 +35,7 @@ type rewinderDB interface { FindSealedBlock(eth.ChainID, uint64) (types.BlockSeal, error) Finalized(eth.ChainID) (types.BlockSeal, error) - LocalDerivedFrom(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) + LocalDerivedToSource(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) } // Rewinder is responsible for handling the rewinding of databases to the latest common ancestor between @@ -121,7 +121,7 @@ func (r *Rewinder) handleLocalDerivedEvent(ev superevents.LocalSafeUpdateEvent) return } - _, err := r.db.LocalDerivedFrom(ev.ChainID, target.ID()) + _, err := r.db.LocalDerivedToSource(ev.ChainID, target.ID()) if err != nil { if errors.Is(err, types.ErrConflict) || errors.Is(err, types.ErrFuture) { continue @@ -153,7 +153,7 @@ func (r *Rewinder) rewindL1ChainIfReorged(chainID eth.ChainID, newTip eth.BlockI if err != nil { return fmt.Errorf("failed to get local safe for chain %s: %w", chainID, err) } - localSafeL1 := localSafe.DerivedFrom + localSafeL1 := localSafe.Source // Get the canonical L1 block at our local head's height canonicalL1, err := r.l1Node.L1BlockRefByNumber(context.Background(), localSafeL1.Number) @@ -179,7 +179,7 @@ func (r *Rewinder) rewindL1ChainIfReorged(chainID eth.ChainID, newTip eth.BlockI return fmt.Errorf("failed to get finalized block for chain %s: %w", chainID, err) } } - finalizedL1, err := r.db.CrossDerivedFromBlockRef(chainID, finalized.ID()) + finalizedL1, err := r.db.CrossDerivedToSourceRef(chainID, finalized.ID()) if err != nil { return fmt.Errorf("failed to get finalized L1 block for chain %s: %w", chainID, err) } @@ -201,7 +201,7 @@ func (r *Rewinder) rewindL1ChainIfReorged(chainID eth.ChainID, newTip eth.BlockI } // Get the previous L1 block from our DB - prevDerivedFrom, err := r.db.PreviousDerivedFrom(chainID, currentL1) + prevDerivedFrom, err := r.db.PreviousSource(chainID, currentL1) if err != nil { // If we hit the first block, use it as common ancestor if errors.Is(err, types.ErrPreviousToFirst) { diff --git a/op-supervisor/supervisor/backend/rewinder/rewinder_test.go b/op-supervisor/supervisor/backend/rewinder/rewinder_test.go index cb59b2e2fa8..2b09c50c48e 100644 --- a/op-supervisor/supervisor/backend/rewinder/rewinder_test.go +++ b/op-supervisor/supervisor/backend/rewinder/rewinder_test.go @@ -163,7 +163,7 @@ func TestRewindL2(t *testing.T) { i.OnEvent(superevents.LocalSafeUpdateEvent{ ChainID: chainID, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Hash: l1Block1.Hash, Number: l1Block1.Number, }, @@ -260,7 +260,7 @@ func TestNoRewindNeeded(t *testing.T) { i.OnEvent(superevents.LocalSafeUpdateEvent{ ChainID: chainID, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Hash: l1Block2.Hash, Number: l1Block2.Number, }, @@ -358,7 +358,7 @@ func TestRewindLongChain(t *testing.T) { i.OnEvent(superevents.LocalSafeUpdateEvent{ ChainID: chainID, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Hash: l1Blocks[96/10].Hash, Number: l1Blocks[96/10].Number, }, @@ -427,7 +427,7 @@ func TestRewindMultiChain(t *testing.T) { i.OnEvent(superevents.LocalSafeUpdateEvent{ ChainID: chainID, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Hash: l1Block1.Hash, Number: l1Block1.Number, }, @@ -565,7 +565,7 @@ func TestRewindL2WalkBack(t *testing.T) { i.OnEvent(superevents.LocalSafeUpdateEvent{ ChainID: chainID, NewLocalSafe: types.DerivedBlockSealPair{ - DerivedFrom: types.BlockSeal{ + Source: types.BlockSeal{ Hash: block4B.L1Origin.Hash, Number: block4B.L1Origin.Number, }, @@ -771,12 +771,12 @@ func setupTestChains(t *testing.T, chainIDs ...eth.ChainID) *testSetup { // Create and open the local derived-from DB localDB, err := fromda.NewFromFile(logger, &stubMetrics{}, filepath.Join(chainDir, "local_safe.db")) require.NoError(t, err) - chainsDB.AddLocalDerivedFromDB(chainID, localDB) + chainsDB.AddLocalDerivationDB(chainID, localDB) // Create and open the cross derived-from DB crossDB, err := fromda.NewFromFile(logger, &stubMetrics{}, filepath.Join(chainDir, "cross_safe.db")) require.NoError(t, err) - chainsDB.AddCrossDerivedFromDB(chainID, crossDB) + chainsDB.AddCrossDerivationDB(chainID, crossDB) // Add cross-unsafe tracker chainsDB.AddCrossUnsafeTracker(chainID) diff --git a/op-supervisor/supervisor/backend/status/status.go b/op-supervisor/supervisor/backend/status/status.go new file mode 100644 index 00000000000..2e7bfbaeb0a --- /dev/null +++ b/op-supervisor/supervisor/backend/status/status.go @@ -0,0 +1,69 @@ +package status + +import ( + "sync" + + "github.com/ethereum-optimism/optimism/op-node/rollup/event" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/superevents" +) + +type StatusTracker struct { + statuses map[eth.ChainID]*NodeSyncStatus + mu sync.RWMutex +} + +type NodeSyncStatus struct { + CurrentL1 eth.L1BlockRef + LocalUnsafe eth.BlockRef +} + +func NewStatusTracker() *StatusTracker { + return &StatusTracker{ + statuses: make(map[eth.ChainID]*NodeSyncStatus), + } +} + +func (su *StatusTracker) OnEvent(ev event.Event) bool { + su.mu.Lock() + defer su.mu.Unlock() + + loadStatusRef := func(chainID eth.ChainID) *NodeSyncStatus { + v := su.statuses[chainID] + if v == nil { + v = &NodeSyncStatus{} + su.statuses[chainID] = v + } + return v + } + switch x := ev.(type) { + case superevents.LocalDerivedOriginUpdateEvent: + status := loadStatusRef(x.ChainID) + status.CurrentL1 = x.Origin + case superevents.LocalUnsafeUpdateEvent: + status := loadStatusRef(x.ChainID) + status.LocalUnsafe = x.NewLocalUnsafe + default: + return false + } + return true +} + +func (su *StatusTracker) SyncStatus() eth.SupervisorSyncStatus { + su.mu.RLock() + defer su.mu.RUnlock() + + var supervisorStatus eth.SupervisorSyncStatus + for _, nodeStatus := range su.statuses { + if supervisorStatus.MinSyncedL1 == (eth.L1BlockRef{}) || supervisorStatus.MinSyncedL1.Number < nodeStatus.CurrentL1.Number { + supervisorStatus.MinSyncedL1 = nodeStatus.CurrentL1 + } + } + supervisorStatus.Chains = make(map[eth.ChainID]*eth.SupervisorChainSyncStatus) + for chainID, nodeStatus := range su.statuses { + supervisorStatus.Chains[chainID] = ð.SupervisorChainSyncStatus{ + LocalUnsafe: nodeStatus.LocalUnsafe, + } + } + return supervisorStatus +} diff --git a/op-supervisor/supervisor/backend/superevents/events.go b/op-supervisor/supervisor/backend/superevents/events.go index 95a57405212..7b8b7a41d37 100644 --- a/op-supervisor/supervisor/backend/superevents/events.go +++ b/op-supervisor/supervisor/backend/superevents/events.go @@ -119,6 +119,15 @@ func (ev LocalDerivedEvent) String() string { return "local-derived" } +type LocalDerivedOriginUpdateEvent struct { + ChainID eth.ChainID + Origin eth.BlockRef +} + +func (ev LocalDerivedOriginUpdateEvent) String() string { + return "local-derived-origin-update" +} + type AnchorEvent struct { ChainID eth.ChainID Anchor types.DerivedBlockRefPair diff --git a/op-supervisor/supervisor/backend/syncnode/controller_test.go b/op-supervisor/supervisor/backend/syncnode/controller_test.go index fafa7f2340d..337df7c444d 100644 --- a/op-supervisor/supervisor/backend/syncnode/controller_test.go +++ b/op-supervisor/supervisor/backend/syncnode/controller_test.go @@ -141,9 +141,10 @@ func sampleDepSet(t *testing.T) depset.DependencySet { } type eventMonitor struct { - anchorCalled int - localDerived int - receivedLocalUnsafe int + anchorCalled int + localDerived int + receivedLocalUnsafe int + localDerivedOriginUpdate int } func (m *eventMonitor) OnEvent(ev event.Event) bool { @@ -154,6 +155,8 @@ func (m *eventMonitor) OnEvent(ev event.Event) bool { m.localDerived += 1 case superevents.LocalUnsafeReceivedEvent: m.receivedLocalUnsafe += 1 + case superevents.LocalDerivedOriginUpdateEvent: + m.localDerivedOriginUpdate += 1 default: return false } @@ -180,8 +183,8 @@ func TestInitFromAnchorPoint(t *testing.T) { ctrl := mockSyncControl{} ctrl.anchorPointFn = func(ctx context.Context) (types.DerivedBlockRefPair, error) { return types.DerivedBlockRefPair{ - Derived: eth.BlockRef{Number: 1}, - DerivedFrom: eth.BlockRef{Number: 0}, + Derived: eth.BlockRef{Number: 1}, + Source: eth.BlockRef{Number: 0}, }, nil } diff --git a/op-supervisor/supervisor/backend/syncnode/node.go b/op-supervisor/supervisor/backend/syncnode/node.go index 32bd79b731f..00944cada15 100644 --- a/op-supervisor/supervisor/backend/syncnode/node.go +++ b/op-supervisor/supervisor/backend/syncnode/node.go @@ -221,6 +221,9 @@ func (m *ManagedNode) onNodeEvent(ev *types.ManagedEvent) { if ev.ReplaceBlock != nil { m.onReplaceBlock(*ev.ReplaceBlock) } + if ev.DerivationOriginUpdate != nil { + m.onDerivationOriginUpdate(*ev.DerivationOriginUpdate) + } } func (m *ManagedNode) onResetEvent(errStr string) { @@ -247,11 +250,11 @@ func (m *ManagedNode) onCrossUnsafeUpdate(seal types.BlockSeal) { } func (m *ManagedNode) onCrossSafeUpdate(pair types.DerivedBlockSealPair) { - m.log.Debug("updating cross safe", "derived", pair.Derived, "derivedFrom", pair.DerivedFrom) + m.log.Debug("updating cross safe", "derived", pair.Derived, "derivedFrom", pair.Source) ctx, cancel := context.WithTimeout(m.ctx, nodeTimeout) defer cancel() pairIDs := pair.IDs() - err := m.Node.UpdateCrossSafe(ctx, pairIDs.Derived, pairIDs.DerivedFrom) + err := m.Node.UpdateCrossSafe(ctx, pairIDs.Derived, pairIDs.Source) if err != nil { m.log.Warn("Node failed cross-safe updating", "err", err) return @@ -280,21 +283,19 @@ func (m *ManagedNode) onUnsafeBlock(unsafeRef eth.BlockRef) { func (m *ManagedNode) onDerivationUpdate(pair types.DerivedBlockRefPair) { m.log.Info("Node derived new block", "derived", pair.Derived, - "derivedParent", pair.Derived.ParentID(), "derivedFrom", pair.DerivedFrom) + "derivedParent", pair.Derived.ParentID(), "derivedFrom", pair.Source) m.emitter.Emit(superevents.LocalDerivedEvent{ ChainID: m.chainID, Derived: pair, }) - // TODO: keep synchronous local-safe DB update feedback? - // We'll still need more async ways of doing this for reorg handling. - - // ctx, cancel := context.WithTimeout(m.ctx, internalTimeout) - // defer cancel() - // if err := m.backend.UpdateLocalSafe(ctx, m.chainID, pair.DerivedFrom, pair.Derived); err != nil { - // m.log.Warn("Backend failed to process local-safe update", - // "derived", pair.Derived, "derivedFrom", pair.DerivedFrom, "err", err) - // m.resetSignal(err, pair.DerivedFrom) - // } +} + +func (m *ManagedNode) onDerivationOriginUpdate(origin eth.BlockRef) { + m.log.Info("Node derived new origin", "origin", origin) + m.emitter.Emit(superevents.LocalDerivedOriginUpdateEvent{ + ChainID: m.chainID, + Origin: origin, + }) } func (m *ManagedNode) resetSignal(errSignal error, l1Ref eth.BlockRef) { @@ -437,17 +438,17 @@ func (m *ManagedNode) resolveConflict(ctx context.Context, l1Ref eth.BlockRef, u } func (m *ManagedNode) onExhaustL1Event(completed types.DerivedBlockRefPair) { - m.log.Info("Node completed syncing", "l2", completed.Derived, "l1", completed.DerivedFrom) + m.log.Info("Node completed syncing", "l2", completed.Derived, "l1", completed.Source) internalCtx, cancel := context.WithTimeout(m.ctx, internalTimeout) defer cancel() - nextL1, err := m.backend.L1BlockRefByNumber(internalCtx, completed.DerivedFrom.Number+1) + nextL1, err := m.backend.L1BlockRefByNumber(internalCtx, completed.Source.Number+1) if err != nil { if errors.Is(err, ethereum.NotFound) { - m.log.Debug("Next L1 block is not yet available", "l1Block", completed.DerivedFrom, "err", err) + m.log.Debug("Next L1 block is not yet available", "l1Block", completed.Source, "err", err) return } - m.log.Error("Failed to retrieve next L1 block for node", "l1Block", completed.DerivedFrom, "err", err) + m.log.Error("Failed to retrieve next L1 block for node", "l1Block", completed.Source, "err", err) return } @@ -466,14 +467,14 @@ func (m *ManagedNode) onExhaustL1Event(completed types.DerivedBlockRefPair) { // and needs to be replaced with a deposit only block. func (m *ManagedNode) onInvalidateLocalSafe(invalidated types.DerivedBlockRefPair) { m.log.Warn("Instructing node to replace invalidated local-safe block", - "invalidated", invalidated.Derived, "scope", invalidated.DerivedFrom) + "invalidated", invalidated.Derived, "scope", invalidated.Source) ctx, cancel := context.WithTimeout(m.ctx, nodeTimeout) defer cancel() // Send instruction to the node to invalidate the block, and build a replacement block. if err := m.Node.InvalidateBlock(ctx, types.BlockSealFromRef(invalidated.Derived)); err != nil { m.log.Warn("Node is unable to invalidate block", - "invalidated", invalidated.Derived, "scope", invalidated.DerivedFrom, "err", err) + "invalidated", invalidated.Derived, "scope", invalidated.Source, "err", err) } } diff --git a/op-supervisor/supervisor/backend/syncnode/node_test.go b/op-supervisor/supervisor/backend/syncnode/node_test.go index 0cdf3ca8301..ff6f0e161d7 100644 --- a/op-supervisor/supervisor/backend/syncnode/node_test.go +++ b/op-supervisor/supervisor/backend/syncnode/node_test.go @@ -75,9 +75,11 @@ func TestEventResponse(t *testing.T) { syncCtrl.subscribeEvents.Send(&types.ManagedEvent{ UnsafeBlock: ð.BlockRef{Number: 1}}) syncCtrl.subscribeEvents.Send(&types.ManagedEvent{ - DerivationUpdate: &types.DerivedBlockRefPair{DerivedFrom: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}}) + DerivationUpdate: &types.DerivedBlockRefPair{Source: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}}) syncCtrl.subscribeEvents.Send(&types.ManagedEvent{ - ExhaustL1: &types.DerivedBlockRefPair{DerivedFrom: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}}) + ExhaustL1: &types.DerivedBlockRefPair{Source: eth.BlockRef{Number: 1}, Derived: eth.BlockRef{Number: 2}}}) + syncCtrl.subscribeEvents.Send(&types.ManagedEvent{ + DerivationOriginUpdate: ð.BlockRef{Number: 1}}) require.NoError(t, ex.Drain()) @@ -86,7 +88,8 @@ func TestEventResponse(t *testing.T) { finalized >= 1 && mon.receivedLocalUnsafe >= 1 && mon.localDerived >= 1 && - nodeExhausted >= 1 + nodeExhausted >= 1 && + mon.localDerivedOriginUpdate >= 1 }, 4*time.Second, 250*time.Millisecond) } diff --git a/op-supervisor/supervisor/frontend/frontend.go b/op-supervisor/supervisor/frontend/frontend.go index 50d10a094c6..1901b8f5a4b 100644 --- a/op-supervisor/supervisor/frontend/frontend.go +++ b/op-supervisor/supervisor/frontend/frontend.go @@ -18,12 +18,13 @@ type AdminBackend interface { type QueryBackend interface { CheckMessage(identifier types.Identifier, payloadHash common.Hash) (types.SafetyLevel, error) CheckMessages(messages []types.Message, minSafety types.SafetyLevel) error - CrossDerivedFrom(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) + CrossDerivedToSource(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) LocalUnsafe(ctx context.Context, chainID eth.ChainID) (eth.BlockID, error) CrossSafe(ctx context.Context, chainID eth.ChainID) (types.DerivedIDPair, error) Finalized(ctx context.Context, chainID eth.ChainID) (eth.BlockID, error) FinalizedL1() eth.BlockRef SuperRootAtTimestamp(ctx context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) + SyncStatus() (eth.SupervisorSyncStatus, error) AllSafeDerivedAt(ctx context.Context, derivedFrom eth.BlockID) (derived map[eth.ChainID]eth.BlockID, err error) } @@ -68,8 +69,14 @@ func (q *QueryFrontend) FinalizedL1() eth.BlockRef { return q.Supervisor.FinalizedL1() } +// CrossDerivedFrom is deprecated, but remains for backwards compatibility to callers +// it is equivalent to CrossDerivedToSource func (q *QueryFrontend) CrossDerivedFrom(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { - return q.Supervisor.CrossDerivedFrom(ctx, chainID, derived) + return q.Supervisor.CrossDerivedToSource(ctx, chainID, derived) +} + +func (q *QueryFrontend) CrossDerivedToSource(ctx context.Context, chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) { + return q.Supervisor.CrossDerivedToSource(ctx, chainID, derived) } func (q *QueryFrontend) SuperRootAtTimestamp(ctx context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) { @@ -80,6 +87,10 @@ func (q *QueryFrontend) AllSafeDerivedAt(ctx context.Context, derivedFrom eth.Bl return q.Supervisor.AllSafeDerivedAt(ctx, derivedFrom) } +func (q *QueryFrontend) SyncStatus() (eth.SupervisorSyncStatus, error) { + return q.Supervisor.SyncStatus() +} + type AdminFrontend struct { Supervisor Backend } diff --git a/op-supervisor/supervisor/types/types.go b/op-supervisor/supervisor/types/types.go index 53385673574..6a2a9e1c148 100644 --- a/op-supervisor/supervisor/types/types.go +++ b/op-supervisor/supervisor/types/types.go @@ -36,6 +36,15 @@ func (ci *ChainIndex) UnmarshalText(data []byte) error { return nil } +// ContainsQuery contains all the information needed to check a message +// against a chain's database, to determine if it is valid (ie all invariants hold). +type ContainsQuery struct { + Timestamp uint64 + BlockNum uint64 + LogIdx uint32 + LogHash common.Hash // LogHash commits to the origin-address and the message payload-hash +} + type ExecutingMessage struct { Chain ChainIndex // same as ChainID for now, but will be indirect, i.e. translated to full ID, later BlockNum uint64 @@ -261,43 +270,43 @@ func LogToMessagePayload(l *ethTypes.Log) []byte { return msg } -// DerivedBlockRefPair is a pair of block refs, where Derived (L2) is derived from DerivedFrom (L1). +// DerivedBlockRefPair is a pair of block refs, where Derived (L2) is derived from Source (L1). type DerivedBlockRefPair struct { - DerivedFrom eth.BlockRef `json:"derivedFrom"` - Derived eth.BlockRef `json:"derived"` + Source eth.BlockRef `json:"source"` + Derived eth.BlockRef `json:"derived"` } func (refs *DerivedBlockRefPair) IDs() DerivedIDPair { return DerivedIDPair{ - DerivedFrom: refs.DerivedFrom.ID(), - Derived: refs.Derived.ID(), + Source: refs.Source.ID(), + Derived: refs.Derived.ID(), } } func (refs *DerivedBlockRefPair) Seals() DerivedBlockSealPair { return DerivedBlockSealPair{ - DerivedFrom: BlockSealFromRef(refs.DerivedFrom), - Derived: BlockSealFromRef(refs.Derived), + Source: BlockSealFromRef(refs.Source), + Derived: BlockSealFromRef(refs.Derived), } } -// DerivedBlockSealPair is a pair of block seals, where Derived (L2) is derived from DerivedFrom (L1). +// DerivedBlockSealPair is a pair of block seals, where Derived (L2) is derived from Source (L1). type DerivedBlockSealPair struct { - DerivedFrom BlockSeal `json:"derivedFrom"` - Derived BlockSeal `json:"derived"` + Source BlockSeal `json:"source"` + Derived BlockSeal `json:"derived"` } func (seals *DerivedBlockSealPair) IDs() DerivedIDPair { return DerivedIDPair{ - DerivedFrom: seals.DerivedFrom.ID(), - Derived: seals.Derived.ID(), + Source: seals.Source.ID(), + Derived: seals.Derived.ID(), } } -// DerivedIDPair is a pair of block IDs, where Derived (L2) is derived from DerivedFrom (L1). +// DerivedIDPair is a pair of block IDs, where Derived (L2) is derived from Source (L1). type DerivedIDPair struct { - DerivedFrom eth.BlockID `json:"derivedFrom"` - Derived eth.BlockID `json:"derived"` + Source eth.BlockID `json:"source"` + Derived eth.BlockID `json:"derived"` } type BlockReplacement struct { @@ -308,9 +317,10 @@ type BlockReplacement struct { // ManagedEvent is an event sent by the managed node to the supervisor, // to share an update. One of the fields will be non-null; different kinds of updates may be sent. type ManagedEvent struct { - Reset *string `json:"reset,omitempty"` - UnsafeBlock *eth.BlockRef `json:"unsafeBlock,omitempty"` - DerivationUpdate *DerivedBlockRefPair `json:"derivationUpdate,omitempty"` - ExhaustL1 *DerivedBlockRefPair `json:"exhaustL1,omitempty"` - ReplaceBlock *BlockReplacement `json:"replaceBlock,omitempty"` + Reset *string `json:"reset,omitempty"` + UnsafeBlock *eth.BlockRef `json:"unsafeBlock,omitempty"` + DerivationUpdate *DerivedBlockRefPair `json:"derivationUpdate,omitempty"` + ExhaustL1 *DerivedBlockRefPair `json:"exhaustL1,omitempty"` + ReplaceBlock *BlockReplacement `json:"replaceBlock,omitempty"` + DerivationOriginUpdate *eth.BlockRef `json:"derivationOriginUpdate,omitempty"` } diff --git a/packages/contracts-bedrock/deploy-config/devnetL1-template.json b/packages/contracts-bedrock/deploy-config/devnetL1-template.json deleted file mode 100644 index 2f275f46c4f..00000000000 --- a/packages/contracts-bedrock/deploy-config/devnetL1-template.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "l1ChainID": 900, - "l2ChainID": 901, - "l2BlockTime": 2, - "maxSequencerDrift": 300, - "sequencerWindowSize": 200, - "channelTimeout": 120, - "p2pSequencerAddress": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", - "batchInboxAddress": "0x00289C189bEE4E70334629f04Cd5eD602B6600eB", - "batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", - "l1StartingBlockTag": "earliest", - "l2OutputOracleSubmissionInterval": 10, - "l2OutputOracleStartingTimestamp": 0, - "l2OutputOracleStartingBlockNumber": 0, - "l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", - "l2OutputOracleChallenger": "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65", - "l2GenesisBlockGasLimit": "0x1c9c380", - "l1BlockTime": 6, - "baseFeeVaultRecipient": "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955", - "l1FeeVaultRecipient": "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f", - "sequencerFeeVaultRecipient": "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", - "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", - "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", - "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", - "baseFeeVaultWithdrawalNetwork": 0, - "l1FeeVaultWithdrawalNetwork": 0, - "sequencerFeeVaultWithdrawalNetwork": 0, - "proxyAdminOwner": "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", - "finalSystemOwner": "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", - "superchainConfigGuardian": "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", - "finalizationPeriodSeconds": 2, - "fundDevAccounts": true, - "l2GenesisBlockBaseFeePerGas": "0x1", - "gasPriceOracleOverhead": 2100, - "gasPriceOracleScalar": 1000000, - "gasPriceOracleBaseFeeScalar": 1368, - "gasPriceOracleBlobBaseFeeScalar": 810949, - "enableGovernance": true, - "governanceTokenSymbol": "OP", - "governanceTokenName": "Optimism", - "governanceTokenOwner": "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720", - "eip1559Denominator": 50, - "eip1559DenominatorCanyon": 250, - "eip1559Elasticity": 6, - "l1GenesisBlockTimestamp": "0x123", - "l2GenesisRegolithTimeOffset": "0x0", - "l2GenesisCanyonTimeOffset": "0x0", - "l2GenesisDeltaTimeOffset": "0x0", - "l2GenesisEcotoneTimeOffset": "0x0", - "l2GenesisFjordTimeOffset": "0x0", - "l2GenesisGraniteTimeOffset": "0x0", - "l1CancunTimeOffset": "0x0", - "systemConfigStartBlock": 0, - "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", - "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000001", - "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", - "faultGameMaxDepth": 50, - "faultGameClockExtension": 0, - "faultGameMaxClockDuration": 1200, - "faultGameGenesisBlock": 0, - "faultGameGenesisOutputRoot": "0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF", - "faultGameSplitDepth": 14, - "faultGameWithdrawalDelay": 604800, - "preimageOracleMinProposalSize": 10000, - "preimageOracleChallengePeriod": 120, - "proofMaturityDelaySeconds": 12, - "disputeGameFinalityDelaySeconds": 6, - "respectedGameType": 254, - "useFaultProofs": true, - "useAltDA": false, - "daCommitmentType": "KeccakCommitment", - "daChallengeWindow": 16, - "daResolveWindow": 16, - "daBondSize": 1000000, - "daResolverRefundPercentage": 0 -} diff --git a/packages/contracts-bedrock/deployments/4202/.chainId b/packages/contracts-bedrock/deployments/4202/.chainId deleted file mode 100644 index edfd6052cc7..00000000000 --- a/packages/contracts-bedrock/deployments/4202/.chainId +++ /dev/null @@ -1 +0,0 @@ -4202 \ No newline at end of file diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol index fca2b7df456..b6e5a82ece2 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol @@ -112,6 +112,7 @@ interface IOPContractsManager { struct OpChainConfig { ISystemConfig systemConfigProxy; IProxyAdmin proxyAdmin; + Claim absolutePrestate; } struct AddGameInput { @@ -209,6 +210,8 @@ interface IOPContractsManager { error SuperchainProxyAdminMismatch(); + error PrestateNotSet(); + // -------- Methods -------- function __constructor__( diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 955b8cb4c8e..24d07231e06 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -141,7 +141,7 @@ coverage-lcov-upgrade *ARGS: build-go-ffi # Runs coverage-lcov and coverage-lcov-upgrade and merges their output files info one file coverage-lcov-all *ARGS: just coverage-lcov {{ARGS}} && \ - just coverage-lcov-upgrade {{ARGS}} && \ + just coverage-lcov-upgrade --match-contract OPContractsManager_Upgrade_Test {{ARGS}} && \ lcov -a lcov.info -a lcov-upgrade.info -o lcov-all.info ######################################################## @@ -161,13 +161,6 @@ deploy: # SNAPSHOTS # ######################################################## -# Generates a gas snapshot without building. -gas-snapshot-no-build: - forge snapshot --match-contract GasBenchMark --snap snapshots/.gas-snapshot - -# Generates a gas snapshot. -gas-snapshot: build-go-ffi gas-snapshot-no-build - # Generates default Kontrol summary. kontrol-summary: ./test/kontrol/scripts/make-summary-deployment.sh @@ -194,7 +187,7 @@ semver-lock-no-build: semver-lock: build-source semver-lock-no-build # Generates core snapshots without building contracts. -snapshots-no-build: snapshots-abi-storage-no-build semver-lock-no-build gas-snapshot-no-build +snapshots-no-build: snapshots-abi-storage-no-build semver-lock-no-build # Builds contracts and then generates core snapshots. snapshots: build-source snapshots-no-build @@ -204,13 +197,6 @@ snapshots: build-source snapshots-no-build # CHECKS # ######################################################## -# Checks that the gas snapshot is up to date without building. -gas-snapshot-check-no-build: - forge snapshot --match-contract GasBenchMark --snap snapshots/.gas-snapshot --check - -# Checks that the gas snapshot is up to date. -gas-snapshot-check: build-go-ffi gas-snapshot-check-no-build - # Checks if the snapshots are up to date without building. snapshots-check-no-build: just snapshots-no-build && git diff --exit-code snapshots diff --git a/packages/contracts-bedrock/lib/forge-std b/packages/contracts-bedrock/lib/forge-std index 8f24d6b04c9..3b20d60d14b 160000 --- a/packages/contracts-bedrock/lib/forge-std +++ b/packages/contracts-bedrock/lib/forge-std @@ -1 +1 @@ -Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa +Subproject commit 3b20d60d14b343ee4f908cb8079495c07f5e8981 diff --git a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol index 1327f1f392f..b897e276098 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol @@ -583,44 +583,44 @@ contract DeploySuperchainInterop is DeploySuperchain { /// @notice This is a copy of the `computeCreateAddress` function from `CreateX.sol`. /// This is needed because the `computeCreateAddress` function is not available in go cheatcodes. /// TODO: Remove this function once we have `vm.computeCreateAddress` cheatcode in go. - function _computeCreateAddress(address deployer, uint256 nonce) private pure returns (address computedAddress) { + function _computeCreateAddress(address _deployer, uint256 _nonce) private pure returns (address computedAddress_) { bytes memory data; bytes1 len = bytes1(0x94); // The integer zero is treated as an empty byte string and therefore has only one length prefix, // 0x80, which is calculated via 0x80 + 0. - if (nonce == 0x00) { - data = abi.encodePacked(bytes1(0xd6), len, deployer, bytes1(0x80)); + if (_nonce == 0x00) { + data = abi.encodePacked(bytes1(0xd6), len, _deployer, bytes1(0x80)); } // A one-byte integer in the [0x00, 0x7f] range uses its own value as a length prefix, there is no // additional "0x80 + length" prefix that precedes it. - else if (nonce <= 0x7f) { - data = abi.encodePacked(bytes1(0xd6), len, deployer, uint8(nonce)); + else if (_nonce <= 0x7f) { + data = abi.encodePacked(bytes1(0xd6), len, _deployer, uint8(_nonce)); } // In the case of `nonce > 0x7f` and `nonce <= type(uint8).max`, we have the following encoding scheme // (the same calculation can be carried over for higher nonce bytes): // 0xda = 0xc0 (short RLP prefix) + 0x1a (= the bytes length of: 0x94 + address + 0x84 + nonce, in hex), // 0x94 = 0x80 + 0x14 (= the bytes length of an address, 20 bytes, in hex), // 0x84 = 0x80 + 0x04 (= the bytes length of the nonce, 4 bytes, in hex). - else if (nonce <= type(uint8).max) { - data = abi.encodePacked(bytes1(0xd7), len, deployer, bytes1(0x81), uint8(nonce)); - } else if (nonce <= type(uint16).max) { - data = abi.encodePacked(bytes1(0xd8), len, deployer, bytes1(0x82), uint16(nonce)); - } else if (nonce <= type(uint24).max) { - data = abi.encodePacked(bytes1(0xd9), len, deployer, bytes1(0x83), uint24(nonce)); - } else if (nonce <= type(uint32).max) { - data = abi.encodePacked(bytes1(0xda), len, deployer, bytes1(0x84), uint32(nonce)); - } else if (nonce <= type(uint40).max) { - data = abi.encodePacked(bytes1(0xdb), len, deployer, bytes1(0x85), uint40(nonce)); - } else if (nonce <= type(uint48).max) { - data = abi.encodePacked(bytes1(0xdc), len, deployer, bytes1(0x86), uint48(nonce)); - } else if (nonce <= type(uint56).max) { - data = abi.encodePacked(bytes1(0xdd), len, deployer, bytes1(0x87), uint56(nonce)); + else if (_nonce <= type(uint8).max) { + data = abi.encodePacked(bytes1(0xd7), len, _deployer, bytes1(0x81), uint8(_nonce)); + } else if (_nonce <= type(uint16).max) { + data = abi.encodePacked(bytes1(0xd8), len, _deployer, bytes1(0x82), uint16(_nonce)); + } else if (_nonce <= type(uint24).max) { + data = abi.encodePacked(bytes1(0xd9), len, _deployer, bytes1(0x83), uint24(_nonce)); + } else if (_nonce <= type(uint32).max) { + data = abi.encodePacked(bytes1(0xda), len, _deployer, bytes1(0x84), uint32(_nonce)); + } else if (_nonce <= type(uint40).max) { + data = abi.encodePacked(bytes1(0xdb), len, _deployer, bytes1(0x85), uint40(_nonce)); + } else if (_nonce <= type(uint48).max) { + data = abi.encodePacked(bytes1(0xdc), len, _deployer, bytes1(0x86), uint48(_nonce)); + } else if (_nonce <= type(uint56).max) { + data = abi.encodePacked(bytes1(0xdd), len, _deployer, bytes1(0x87), uint56(_nonce)); } else { - data = abi.encodePacked(bytes1(0xde), len, deployer, bytes1(0x88), uint64(nonce)); + data = abi.encodePacked(bytes1(0xde), len, _deployer, bytes1(0x88), uint64(_nonce)); } - computedAddress = address(uint160(uint256(keccak256(data)))); + computedAddress_ = address(uint160(uint256(keccak256(data)))); } function deploySuperchainImplementationContracts( diff --git a/packages/contracts-bedrock/snapshots/.gas-snapshot b/packages/contracts-bedrock/snapshots/.gas-snapshot deleted file mode 100644 index febca5a38f4..00000000000 --- a/packages/contracts-bedrock/snapshots/.gas-snapshot +++ /dev/null @@ -1,13 +0,0 @@ -GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7589) -GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5589) -GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175700) -GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5144) -GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158509) -GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7619) -GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 356475) -GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2954682) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 551615) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4063763) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 450255) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3496176) -GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 59795) \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 1356470d607..2d622fb7cd7 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -682,6 +682,11 @@ "internalType": "contract IProxyAdmin", "name": "proxyAdmin", "type": "address" + }, + { + "internalType": "Claim", + "name": "absolutePrestate", + "type": "bytes32" } ], "internalType": "struct OPContractsManager.OpChainConfig[]", @@ -863,6 +868,11 @@ "name": "OnlyUpgradeController", "type": "error" }, + { + "inputs": [], + "name": "PrestateNotSet", + "type": "error" + }, { "inputs": [], "name": "ReservedBitsSet", diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 549a827d4ad..c4aa01b1b58 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -16,8 +16,8 @@ "sourceCodeHash": "0xf9ba98657dc235355146e381b654fe3ed766feb7cd87636ec0c9d4c6dd3e1973" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xee8025c5f74ce7d9da5875b5f5b97106af0a307f366cc9559ec27174b3e3d0bc", - "sourceCodeHash": "0x1c37bb41a46a1c0fe6c8cef88037ecc6700675c4cfa8d673349f026ffd919526" + "initCodeHash": "0x89331280649debc424f9dd9f8a0afa877fd5ad122482e8dace21abde2929698d", + "sourceCodeHash": "0xd9e68a8adc202cc6d794fd3c9812013eb97e43a1cad661a55c9030d8b68e0025" }, "src/L1/OptimismPortal2.sol": { "initCodeHash": "0x7488f3311a94278272a5b642d9abd90bb68d72230d5dc3b82fb8c10ddfc2829b", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index d9ca5efaf22..fc3ad743025 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -118,6 +118,7 @@ contract OPContractsManager is ISemver { struct OpChainConfig { ISystemConfig systemConfigProxy; IProxyAdmin proxyAdmin; + Claim absolutePrestate; } struct AddGameInput { @@ -143,9 +144,9 @@ contract OPContractsManager is ISemver { // -------- Constants and Variables -------- - /// @custom:semver 1.0.1 + /// @custom:semver 1.1.0 function version() public pure virtual returns (string memory) { - return "1.0.1"; + return "1.1.0"; } /// @notice Address of the SuperchainConfig contract shared by all chains. @@ -235,6 +236,9 @@ contract OPContractsManager is ISemver { /// @notice Thrown when the SuperchainProxyAdmin does not match the SuperchainConfig's admin. error SuperchainProxyAdminMismatch(); + /// @notice Thrown when a prestate is not set for a game. + error PrestateNotSet(); + // -------- Methods -------- constructor( @@ -520,11 +524,15 @@ contract OPContractsManager is ISemver { IAnchorStateRegistry newAnchorStateRegistryProxy; { // Deploy a new proxy, because we're replacing the old one. + // Include the system config address in the salt to ensure that the new proxy is unique, + // even if another chains with the same L2 chain ID has been deployed by this contract. newAnchorStateRegistryProxy = IAnchorStateRegistry( deployProxy({ _l2ChainId: l2ChainId, _proxyAdmin: _opChainConfigs[i].proxyAdmin, - _saltMixer: "v2.0.0", + _saltMixer: string.concat( + "v2.0.0-", string(bytes.concat(bytes20(address(_opChainConfigs[i].systemConfigProxy)))) + ), _contractName: "AnchorStateRegistry" }) ); @@ -1105,6 +1113,10 @@ contract OPContractsManager is ISemver { // Modify the params with the new anchorStateRegistry and vm values. params.anchorStateRegistry = IAnchorStateRegistry(address(_newAnchorStateRegistryProxy)); params.vm = IBigStepper(_implementations.mipsImpl); + if (Claim.unwrap(_opChainConfig.absolutePrestate) == bytes32(0)) { + revert PrestateNotSet(); + } + params.absolutePrestate = _opChainConfig.absolutePrestate; IDisputeGame newGame; if (GameType.unwrap(_gameType) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON)) { diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 26c7c9ddd62..95a25e2904d 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -10,6 +10,7 @@ import { DelegateCaller } from "test/mocks/Callers.sol"; // Scripts import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +import { Deploy } from "scripts/deploy/Deploy.s.sol"; // Libraries import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; @@ -212,6 +213,7 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { IProxyAdmin superchainProxyAdmin; address upgrader; IOPContractsManager.OpChainConfig[] opChainConfigs; + Claim absolutePrestate; function setUp() public virtual override { super.disableUpgradedFork(); @@ -225,6 +227,7 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { "OPContractsManager_Upgrade_Harness: cannot test upgrade on superchain ops repo upgrade tests" ); + absolutePrestate = Claim.wrap(bytes32(keccak256("absolutePrestate"))); proxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(systemConfig))); superchainProxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))); upgrader = proxyAdmin.owner(); @@ -234,7 +237,11 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { vm.etch(upgrader, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); opChainConfigs.push( - IOPContractsManager.OpChainConfig({ systemConfigProxy: systemConfig, proxyAdmin: proxyAdmin }) + IOPContractsManager.OpChainConfig({ + systemConfigProxy: systemConfig, + proxyAdmin: proxyAdmin, + absolutePrestate: absolutePrestate + }) ); // Retrieve the l2ChainId, which was read from the superchain-registry, and saved in Artifacts @@ -254,15 +261,19 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { } function runUpgradeTestAndChecks(address _delegateCaller) public { - vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - IOPContractsManager.Implementations memory impls = opcm.implementations(); // Cache the old L1xDM address so we can look for it in the AddressManager's event address oldL1CrossDomainMessenger = addressManager.getAddress("OVM_L1CrossDomainMessenger"); // Predict the address of the new AnchorStateRegistry proxy - bytes32 salt = keccak256(abi.encode(l2ChainId, "v2.0.0", "AnchorStateRegistry")); + bytes32 salt = keccak256( + abi.encode( + l2ChainId, + string.concat("v2.0.0-", string(bytes.concat(bytes20(address(opChainConfigs[0].systemConfigProxy))))), + "AnchorStateRegistry" + ) + ); bytes memory initCode = bytes.concat(vm.getCode("Proxy"), abi.encode(proxyAdmin)); address newAnchorStateRegistryProxy = vm.computeCreate2Address(salt, keccak256(initCode), _delegateCaller); vm.label(newAnchorStateRegistryProxy, "NewAnchorStateRegistryProxy"); @@ -295,9 +306,14 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { vm.expectEmit(address(_delegateCaller)); emit Upgraded(l2ChainId, opChainConfigs[0].systemConfigProxy, address(_delegateCaller)); + // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, + // then reset its code to the original code. + bytes memory delegateCallerCode = address(_delegateCaller).code; + vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); DelegateCaller(_delegateCaller).dcForward( address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) ); + vm.etch(_delegateCaller, delegateCallerCode); // Check the implementations of the core addresses assertEq(impls.systemConfigImpl, EIP1967Helper.getImplementation(address(systemConfig))); @@ -397,6 +413,18 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { // Run the upgrade test and checks runUpgradeTestAndChecks(_nonUpgradeController); } + + function test_upgrade_duplicateL2ChainId_succeeds() public { + // Deploy a new OPChain with the same L2 chain ID as the current OPChain + Deploy deploy = Deploy(address(uint160(uint256(keccak256(abi.encode("optimism.deploy")))))); + IOPContractsManager.DeployInput memory deployInput = deploy.getDeployInput(); + deployInput.l2ChainId = l2ChainId; + deployInput.saltMixer = "v2.0.0"; + opcm.deploy(deployInput); + + // Try to upgrade the current OPChain + runUpgradeTestAndChecks(upgrader); + } } contract OPContractsManager_Upgrade_TestFails is OPContractsManager_Upgrade_Harness { @@ -446,6 +474,12 @@ contract OPContractsManager_Upgrade_TestFails is OPContractsManager_Upgrade_Harn address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) ); } + + function test_upgrade_absolutePrestateNotSet_reverts() public { + opChainConfigs[0].absolutePrestate = Claim.wrap(bytes32(0)); + vm.expectRevert(IOPContractsManager.PrestateNotSet.selector); + DelegateCaller(upgrader).dcForward(address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs))); + } } contract OPContractsManager_SetRC_Test is OPContractsManager_Upgrade_Harness { @@ -467,6 +501,7 @@ contract OPContractsManager_SetRC_Test is OPContractsManager_Upgrade_Harness { /// @notice Tests the setRC function can not be set by non-upgrade controller. function test_setRC_nonUpgradeController_reverts(address _nonUpgradeController) public { + // Disallow the upgrade controller to have code, or be a 'special' address. if ( _nonUpgradeController == upgrader || _nonUpgradeController == address(0) || _nonUpgradeController < address(0x4200000000000000000000000000000000000000) @@ -474,6 +509,7 @@ contract OPContractsManager_SetRC_Test is OPContractsManager_Upgrade_Harness { || _nonUpgradeController == address(vm) || _nonUpgradeController == 0x000000000000000000636F6e736F6c652e6c6f67 || _nonUpgradeController == 0x4e59b44847b379578588920cA78FbF26c0B4956C + || _nonUpgradeController.code.length > 0 ) { _nonUpgradeController = makeAddr("nonUpgradeController"); } diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index 768c61a6b5c..e68b5e2276d 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -250,8 +250,11 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { /// @dev Tests that `findLatestGames` returns an empty array when the passed starting index is greater than or equal /// to the game count. function testFuzz_findLatestGames_greaterThanLength_succeeds(uint256 _start) public { + // Creation count should be 32 for normal tests, 5 for upgrade tests. + uint256 creationCount = isForkTest() ? 5 : 32; + // Create some dispute games of varying game types. - for (uint256 i; i < 1 << 5; i++) { + for (uint256 i; i < creationCount; i++) { disputeGameFactory.create(GameType.wrap(uint8(i % 2)), Claim.wrap(bytes32(i)), abi.encode(i)); } @@ -267,9 +270,11 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { /// @dev Tests that `findLatestGames` returns the correct games. function test_findLatestGames_static_succeeds() public { + // Creation count should be 32 for normal tests, 5 for upgrade tests. + uint256 creationCount = isForkTest() ? 5 : 32; + // Create some dispute games of varying game types, repeatedly iterating over the game types 0, 1, 2. - // 1 << 5 = 32, resulting in the final three games added being ordered 2, 0, 1. - for (uint256 i; i < 1 << 5; i++) { + for (uint256 i; i < creationCount; i++) { disputeGameFactory.create(GameType.wrap(uint8(i % 3)), Claim.wrap(bytes32(i)), abi.encode(i)); } @@ -282,6 +287,7 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { // Find type 1 games. games = disputeGameFactory.findLatestGames(GameType.wrap(1), start, 1); assertEq(games.length, 1); + // The type 1 game should be the last one added. assertEq(games[0].index, start); (GameType gameType, Timestamp createdAt, address game) = games[0].metadata.unpack(); @@ -291,6 +297,7 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { // Find type 0 games. games = disputeGameFactory.findLatestGames(GameType.wrap(0), start, 1); assertEq(games.length, 1); + // The type 0 game should be the second to last one added. assertEq(games[0].index, start - 1); (gameType, createdAt, game) = games[0].metadata.unpack(); @@ -300,6 +307,7 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { // Find type 2 games. games = disputeGameFactory.findLatestGames(GameType.wrap(2), start, 1); assertEq(games.length, 1); + // The type 2 game should be the third to last one added. assertEq(games[0].index, start - 2); (gameType, createdAt, game) = games[0].metadata.unpack(); @@ -341,7 +349,7 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_Init { ) public { - _numGames = bound(_numGames, 0, 1 << 8); + _numGames = bound(_numGames, 0, isForkTest() ? 5 : 256); _numSearchedGames = bound(_numSearchedGames, 0, _numGames); _n = bound(_n, 0, _numSearchedGames); diff --git a/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol index d9ddd6c16f2..06e20c078c1 100644 --- a/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol @@ -2,6 +2,7 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; +import { Claim } from "src/dispute/lib/Types.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; @@ -53,7 +54,8 @@ contract UpgradeOPChainInput_Test is Test { configs[0] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(systemConfig1), - proxyAdmin: IProxyAdmin(proxyAdmin1) + proxyAdmin: IProxyAdmin(proxyAdmin1), + absolutePrestate: Claim.wrap(bytes32(uint256(1))) }); // Setup mock addresses and contracts for second config @@ -64,13 +66,20 @@ contract UpgradeOPChainInput_Test is Test { configs[1] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(systemConfig2), - proxyAdmin: IProxyAdmin(proxyAdmin2) + proxyAdmin: IProxyAdmin(proxyAdmin2), + absolutePrestate: Claim.wrap(bytes32(uint256(2))) }); input.set(input.opChainConfigs.selector, configs); bytes memory storedConfigs = input.opChainConfigs(); assertEq(storedConfigs, abi.encode(configs)); + + // Additional verification of stored claims if needed + OPContractsManager.OpChainConfig[] memory decodedConfigs = + abi.decode(storedConfigs, (OPContractsManager.OpChainConfig[])); + assertEq(Claim.unwrap(decodedConfigs[0].absolutePrestate), bytes32(uint256(1))); + assertEq(Claim.unwrap(decodedConfigs[1].absolutePrestate), bytes32(uint256(2))); } function test_setAddress_withZeroAddress_reverts() public { @@ -101,7 +110,8 @@ contract UpgradeOPChainInput_Test is Test { configs[0] = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(mockSystemConfig), - proxyAdmin: IProxyAdmin(mockProxyAdmin) + proxyAdmin: IProxyAdmin(mockProxyAdmin), + absolutePrestate: Claim.wrap(bytes32(uint256(1))) }); vm.expectRevert("UpgradeOPCMInput: unknown selector"); @@ -110,10 +120,14 @@ contract UpgradeOPChainInput_Test is Test { } contract MockOPCM { - event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin); + event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin, bytes32 indexed absolutePrestate); function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) public { - emit UpgradeCalled(address(_opChainConfigs[0].systemConfigProxy), address(_opChainConfigs[0].proxyAdmin)); + emit UpgradeCalled( + address(_opChainConfigs[0].systemConfigProxy), + address(_opChainConfigs[0].proxyAdmin), + Claim.unwrap(_opChainConfigs[0].absolutePrestate) + ); } } @@ -124,7 +138,7 @@ contract UpgradeOPChain_Test is Test { UpgradeOPChain upgradeOPChain; address prank; - event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin); + event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin, bytes32 indexed absolutePrestate); function setUp() public virtual { mockOPCM = new MockOPCM(); @@ -132,7 +146,8 @@ contract UpgradeOPChain_Test is Test { uoci.set(uoci.opcm.selector, address(mockOPCM)); config = OPContractsManager.OpChainConfig({ systemConfigProxy: ISystemConfig(makeAddr("systemConfigProxy")), - proxyAdmin: IProxyAdmin(makeAddr("proxyAdmin")) + proxyAdmin: IProxyAdmin(makeAddr("proxyAdmin")), + absolutePrestate: Claim.wrap(keccak256("absolutePrestate")) }); OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); configs[0] = config; @@ -144,8 +159,10 @@ contract UpgradeOPChain_Test is Test { function test_upgrade_succeeds() public { // UpgradeCalled should be emitted by the prank since it's a delegate call. - vm.expectEmit(true, true, false, false, address(prank)); - emit UpgradeCalled(address(config.systemConfigProxy), address(config.proxyAdmin)); + vm.expectEmit(address(prank)); + emit UpgradeCalled( + address(config.systemConfigProxy), address(config.proxyAdmin), Claim.unwrap(config.absolutePrestate) + ); upgradeOPChain.run(uoci); } } diff --git a/packages/contracts-bedrock/test/setup/ForkLive.s.sol b/packages/contracts-bedrock/test/setup/ForkLive.s.sol index 61b9e74f0fb..45c31273428 100644 --- a/packages/contracts-bedrock/test/setup/ForkLive.s.sol +++ b/packages/contracts-bedrock/test/setup/ForkLive.s.sol @@ -12,7 +12,7 @@ import { Deployer } from "scripts/deploy/Deployer.sol"; import { Deploy } from "scripts/deploy/Deploy.s.sol"; // Libraries -import { GameTypes } from "src/dispute/lib/Types.sol"; +import { GameTypes, Claim } from "src/dispute/lib/Types.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Interfaces @@ -171,12 +171,18 @@ contract ForkLive is Deployer { vm.label(upgrader, "ProxyAdmin Owner"); IOPContractsManager.OpChainConfig[] memory opChains = new IOPContractsManager.OpChainConfig[](1); - opChains[0] = IOPContractsManager.OpChainConfig({ systemConfigProxy: systemConfig, proxyAdmin: proxyAdmin }); - - // TODO Migrate from DelegateCaller to a Safe to reduce risk of mocks not properly - // reflecting the production system. + opChains[0] = IOPContractsManager.OpChainConfig({ + systemConfigProxy: systemConfig, + proxyAdmin: proxyAdmin, + absolutePrestate: Claim.wrap(bytes32(keccak256("absolutePrestate"))) + }); + + // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, + // then reset its code to the original code. + bytes memory upgraderCode = address(upgrader).code; vm.etch(upgrader, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); DelegateCaller(upgrader).dcForward(address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChains))); + vm.etch(upgrader, upgraderCode); console.log("ForkLive: Saving newly deployed contracts"); // A new ASR and new dispute games were deployed, so we need to update them diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index 7a79fba6d2d..4cc80f6ac50 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; // Testing import { console2 as console } from "forge-std/console2.sol"; -import { Vm } from "forge-std/Vm.sol"; +import { Vm, VmSafe } from "forge-std/Vm.sol"; // Scripts import { Deploy } from "scripts/deploy/Deploy.s.sol"; @@ -179,6 +179,13 @@ contract Setup { console.log("Setup: L2 setup done!"); } + /// @dev Skips tests when running in coverage mode. + function skipIfCoverage() public { + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + vm.skip(true); + } + } + /// @dev Skips tests when running against a forked production network. function skipIfForkTest(string memory message) public { if (isForkTest()) { diff --git a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol index ae283ef8dba..0c82feda3b5 100644 --- a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol +++ b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol @@ -1,18 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Testing +// Forge import { Vm } from "forge-std/Vm.sol"; + +// Testing import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { SafeCall } from "src/libraries/SafeCall.sol"; -import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; import { Encoding } from "src/libraries/Encoding.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Interfaces -import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +import { IL1BlockInterop } from "interfaces/L2/IL1BlockInterop.sol"; // Free function for setting the prevBaseFee param in the OptimismPortal. function setPrevBaseFee(Vm _vm, address _op, uint128 _prevBaseFee) { @@ -28,105 +29,17 @@ contract SetPrevBaseFee_Test is CommonTest { } } -contract GasBenchMark_L1CrossDomainMessenger is CommonTest { - function test_sendMessage_benchmark_0() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 1 gwei); - // The amount of data typically sent during a bridge deposit. - bytes memory data = - hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; - vm.resumeGasMetering(); - l1CrossDomainMessenger.sendMessage(bob, data, uint32(100)); - } - - function test_sendMessage_benchmark_1() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 10 gwei); - // The amount of data typically sent during a bridge deposit. - bytes memory data = - hex"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"; - vm.resumeGasMetering(); - l1CrossDomainMessenger.sendMessage(bob, data, uint32(100)); - } -} - -contract GasBenchMark_L1StandardBridge_Deposit is CommonTest { - function setUp() public virtual override { - super.setUp(); - deal(address(L1Token), alice, 100000, true); - vm.startPrank(alice, alice); - L1Token.approve(address(l1StandardBridge), type(uint256).max); - } - - function test_depositETH_benchmark_0() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 1 gwei); - vm.resumeGasMetering(); - l1StandardBridge.depositETH{ value: 500 }(50000, hex""); - } - - function test_depositETH_benchmark_1() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 10 gwei); - vm.resumeGasMetering(); - l1StandardBridge.depositETH{ value: 500 }(50000, hex""); - } - - function test_depositERC20_benchmark_0() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 1 gwei); - vm.resumeGasMetering(); - l1StandardBridge.bridgeERC20({ - _localToken: address(L1Token), - _remoteToken: address(L2Token), - _amount: 100, - _minGasLimit: 100_000, - _extraData: hex"" - }); - } - - function test_depositERC20_benchmark_1() external { - vm.pauseGasMetering(); - setPrevBaseFee(vm, address(optimismPortal2), 10 gwei); - vm.resumeGasMetering(); - l1StandardBridge.bridgeERC20({ - _localToken: address(L1Token), - _remoteToken: address(L2Token), - _amount: 100, - _minGasLimit: 100_000, - _extraData: hex"" - }); - } -} - -contract GasBenchMark_L1StandardBridge_Finalize is CommonTest { - function setUp() public virtual override { - super.setUp(); - deal(address(L1Token), address(l1StandardBridge), 100, true); - vm.mockCall( - address(l1StandardBridge.messenger()), - abi.encodeCall(ICrossDomainMessenger.xDomainMessageSender, ()), - abi.encode(address(l1StandardBridge.OTHER_BRIDGE())) - ); - vm.startPrank(address(l1StandardBridge.messenger())); - vm.deal(address(l1StandardBridge.messenger()), 100); - } - - function test_finalizeETHWithdrawal_benchmark() external { - // TODO: Make this more accurate. It is underestimating the cost because it pranks - // the call coming from the messenger, which bypasses the portal - // and oracle. - l1StandardBridge.finalizeETHWithdrawal{ value: 100 }(alice, alice, 100, hex""); - } -} - contract GasBenchMark_L1Block is CommonTest { address depositor; bytes setValuesCalldata; function setUp() public virtual override { super.setUp(); + + // Get the address of the depositor. depositor = l1Block.DEPOSITOR_ACCOUNT(); + + // Set up the calldata for setting the values. setValuesCalldata = Encoding.encodeSetL1BlockValuesEcotone( type(uint32).max, type(uint32).max, @@ -138,23 +51,41 @@ contract GasBenchMark_L1Block is CommonTest { keccak256(abi.encode(1)), bytes32(type(uint256).max) ); + + // Start pranking the depositor account. vm.startPrank(depositor); } } contract GasBenchMark_L1Block_SetValuesEcotone is GasBenchMark_L1Block { function test_setL1BlockValuesEcotone_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Test SafeCall.call({ _target: address(l1Block), _calldata: setValuesCalldata }); + + // Assert + assertLt(vm.lastCallGas().gasTotalUsed, 160_000); } } contract GasBenchMark_L1Block_SetValuesEcotone_Warm is GasBenchMark_L1Block { - function setUp() public virtual override { + function test_setL1BlockValuesEcotone_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Setup + // Trigger so storage is warm. SafeCall.call({ _target: address(l1Block), _calldata: setValuesCalldata }); - } - function test_setL1BlockValuesEcotone_benchmark() external { + // Test SafeCall.call({ _target: address(l1Block), _calldata: setValuesCalldata }); + + // Assert + // setL1BlockValuesEcotone system tx ONLY gets 1m gas. + // 200k is a safe boundary to prevent hitting the limit. + assertLt(vm.lastCallGas().gasTotalUsed, 200_000); } } @@ -163,12 +94,16 @@ contract GasBenchMark_L1BlockInterop is GasBenchMark_L1Block { function setUp() public virtual override { super.setUp(); + + // Create the L1BlockInterop contract. l1BlockInterop = IL1BlockInterop( DeployUtils.create1({ _name: "L1BlockInterop", _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1BlockInterop.__constructor__, ())) }) ); + + // Set up the calldata for setting the values. setValuesCalldata = Encoding.encodeSetL1BlockValuesInterop( type(uint32).max, type(uint32).max, @@ -185,40 +120,74 @@ contract GasBenchMark_L1BlockInterop is GasBenchMark_L1Block { contract GasBenchMark_L1BlockInterop_SetValuesInterop is GasBenchMark_L1BlockInterop { function test_setL1BlockValuesInterop_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Test SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); + + // Assert + // setL1BlockValuesInterop system tx ONLY gets 1m gas. + // 200k is a safe boundary to prevent hitting the limit. + assertLt(vm.lastCallGas().gasTotalUsed, 200_000); } } contract GasBenchMark_L1BlockInterop_SetValuesInterop_Warm is GasBenchMark_L1BlockInterop { - function setUp() public virtual override { + function test_setL1BlockValuesInterop_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Setup + // Trigger so storage is warm. SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); - } - function test_setL1BlockValuesInterop_benchmark() external { + // Test SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); + + // Assert + // setL1BlockValuesInterop system tx ONLY gets 1m gas. + // 200k is a safe boundary to prevent hitting the limit. + assertLt(vm.lastCallGas().gasTotalUsed, 200_000); } } contract GasBenchMark_L1BlockInterop_DepositsComplete is GasBenchMark_L1BlockInterop { function test_depositsComplete_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Test SafeCall.call({ _target: address(l1BlockInterop), _calldata: abi.encodeCall(IL1BlockInterop.depositsComplete, ()) }); + + // Assert + // depositsComplete system tx ONLY gets 15k gas. + // 5_000 is a safe boundary to prevent hitting the limit. + assertLt(vm.lastCallGas().gasTotalUsed, 5_000); } } contract GasBenchMark_L1BlockInterop_DepositsComplete_Warm is GasBenchMark_L1BlockInterop { - function setUp() public virtual override { - super.setUp(); - // Set the isDeposit flag to true so then we can benchmark when it is reset. + function test_depositsComplete_benchmark() external { + // Skip if the test is running in coverage. + skipIfCoverage(); + + // Setup + // Trigger so storage is warm. SafeCall.call({ _target: address(l1BlockInterop), _calldata: setValuesCalldata }); - } - function test_depositsComplete_benchmark() external { + // Test SafeCall.call({ _target: address(l1BlockInterop), _calldata: abi.encodeCall(l1BlockInterop.depositsComplete, ()) }); + + // Assert + // depositsComplete system tx ONLY gets 15k gas. + // 5_000 is a safe boundary to prevent hitting the limit. + assertLt(vm.lastCallGas().gasTotalUsed, 5_000); } }