diff --git a/.circleci/config.yml b/.circleci/config.yml index fa8a8884bba..f948e751661 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,12 +1,9 @@ version: 2.1 parameters: - ci_builder_image: + default_docker_image: type: string - default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:v0.59.0 - ci_builder_rust_image: - type: string - default: us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder-rust:latest + default: cimg/base:2024.01 base_image: type: string default: default @@ -129,14 +126,6 @@ commands: # of the -j parameter, which speeds it up a lot. git submodule update --init --recursive --force -j 8 working_directory: packages/contracts-bedrock - - run: - name: Verify mise dependencies - command: | - if command -v mise &> /dev/null; then - mise install - else - echo "mise not found, skipping" - fi notify-failures-on-develop: description: "Notify Slack" @@ -172,6 +161,67 @@ commands: environment: FOUNDRY_PROFILE: ci + checkout-with-mise: + steps: + - checkout + - run: + name: Initialize mise environment + command: | + # This is used to create a per-user cache key to preserve permissions across different + # executor types. + user=$(whoami) + echo "$user" > .executor-user + echo "Set executor user to $user." + + if [[ "$user" == "root" ]]; then + # Self-hosted runners will persist this cache between runs. Cleaning it up means that we + # preserve the semantics of the cache regardless of executor type. It's also much faster + # to delete the cache and recreate it than it is to overwrite it in place. + rm -rf /data/mise-data + echo "Cleaned up cache data." + + mkdir -p /data/mise-data + echo "Created Mise data dir." + mkdir -p ~/.cache + echo "Created Mise cache dir." + else + sudo rm -rf /data/mise-data + echo "Cleaned up cache data." + sudo mkdir -p /data/mise-data + sudo chown -R "$user:$user" /data/mise-data + echo "Created Mise data dir." + sudo mkdir -p ~/.cache + sudo chown -R "$user:$user" ~/.cache + echo "Created Mise cache dir." + fi + - restore_cache: + name: Restore mise cache + keys: + - mise-v5-{{ checksum ".executor-user" }}-{{ checksum "mise.toml" }} + - run: + name: Install mise + command: | + if command -v mise &> /dev/null; then + echo "mise already installed at $(command -v mise)" + else + curl https://mise.run | sh + fi + + echo "export PATH=\"$HOME/.local/bin:\$PATH\"" >> "$BASH_ENV" + echo "export MISE_DATA_DIR=/data/mise-data" >> "$BASH_ENV" + echo "export MISE_JOBS=$(nproc)" >> "$BASH_ENV" + echo "eval \"\$($HOME/.local/bin/mise activate --shims)\"" >> "$BASH_ENV" + - run: + name: Install mise deps + command: | + mise install -v -y + - save_cache: + name: Save mise cache + key: mise-v5-{{ checksum ".executor-user" }}-{{ checksum "mise.toml" }} + paths: + - /data/mise-data + + jobs: cannon-go-lint-and-test: machine: true @@ -188,7 +238,7 @@ jobs: type: integer default: 32 steps: - - checkout + - checkout-with-mise - check-changed: patterns: cannon,packages/contracts-bedrock/src/cannon,op-preimage,go.mod - attach_workspace: @@ -257,24 +307,29 @@ jobs: mentions: "@proofs-team" cannon-build-test-vectors: - docker: - - image: <> - resource_class: medium + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - check-changed: patterns: cannon/mipsevm/tests/open_mips_tests/test + - run: + name: Install dependencies + command: | + sudo apt-get update + sudo apt-get install -y binutils-mips-linux-gnu + pip install capstone pyelftools - run: name: Build MIPS test vectors - command: python3 maketests.py && git diff --exit-code + command: | + python3 maketests.py && git diff --exit-code working_directory: cannon/mipsevm/tests/open_mips_tests diff-asterisc-bytecode: - docker: - - image: <> - resource_class: medium + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - run: name: Check `RISCV.sol` bytecode working_directory: packages/contracts-bedrock @@ -322,7 +377,7 @@ jobs: type: string default: ci steps: - - checkout + - checkout-with-mise - install-contracts-dependencies - run: name: Print forge version @@ -349,10 +404,10 @@ jobs: check-kontrol-build: docker: - - image: <> + - image: <> resource_class: xlarge steps: - - checkout + - checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: @@ -413,7 +468,7 @@ jobs: resource_class: "<>" docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages steps: - - checkout + - checkout-with-mise - attach_workspace: at: /tmp/docker_images - run: @@ -564,7 +619,7 @@ jobs: # Verify newly published images (built on AMD machine) will run on ARM check-cross-platform: docker: - - image: cimg/base:current + - image: <> resource_class: arm.medium parameters: registry: @@ -606,6 +661,20 @@ jobs: docker pull $image_name || exit 1 docker run $image_name <> --version || exit 1 + contracts-bedrock-frozen-code: + machine: true + resource_class: ethereum-optimism/latitude-1 + steps: + - checkout-with-mise + - attach_workspace: { at: "." } + - install-contracts-dependencies + - check-changed: + patterns: contracts-bedrock + - run: + name: Check frozen files + command: just check-frozen-code + working_directory: packages/contracts-bedrock + contracts-bedrock-tests: machine: true resource_class: ethereum-optimism/latitude-1 @@ -630,7 +699,7 @@ jobs: type: string default: ci steps: - - checkout + - checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - run: @@ -707,7 +776,7 @@ jobs: type: string default: ci steps: - - checkout + - checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: @@ -755,7 +824,7 @@ jobs: machine: true resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: @@ -786,6 +855,8 @@ jobs: name: Run tests command: just test-upgrade environment: + FOUNDRY_FUZZ_SEED: 42424242 + FOUNDRY_FUZZ_RUNS: 1 FOUNDRY_PROFILE: ci ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io working_directory: packages/contracts-bedrock @@ -794,6 +865,8 @@ jobs: name: Print failed test traces command: just test-upgrade-rerun environment: + FOUNDRY_FUZZ_SEED: 42424242 + FOUNDRY_FUZZ_RUNS: 1 FOUNDRY_PROFILE: ci ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io working_directory: packages/contracts-bedrock @@ -815,7 +888,7 @@ jobs: machine: true resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: @@ -858,7 +931,7 @@ jobs: machine: image: <> steps: - - checkout + - checkout-with-mise - run: name: Install ripgrep command: sudo apt-get install -y ripgrep @@ -882,7 +955,7 @@ jobs: machine: true resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - check-changed: patterns: "<>" - attach_workspace: @@ -907,7 +980,7 @@ jobs: machine: true resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - run: name: run Go linter command: | @@ -946,13 +1019,20 @@ jobs: machine: true resource_class: <> steps: - - checkout + - checkout-with-mise - attach_workspace: at: "." - run: name: build op-program-client command: make op-program-client working_directory: op-program + - run: + name: build op-program-host + command: make op-program-host + working_directory: op-program + - run: + name: build cannon + command: make cannon - run: name: run tests no_output_timeout: <> @@ -1001,10 +1081,14 @@ jobs: mentions: "<>" sanitize-op-program: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise + - run: + name: Install tools + command: | + sudo apt-get install -y binutils-mips-linux-gnu - run: name: Build cannon command: make cannon @@ -1013,13 +1097,15 @@ jobs: command: make op-program - run: name: Sanitize op-program client - command: make -f cannon/Makefile sanitize-program GUEST_PROGRAM=op-program/bin/op-program-client.elf + command: make sanitize-program GUEST_PROGRAM=../op-program/bin/op-program-client.elf + working_directory: cannon + cannon-prestate-quick: machine: true resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - restore_cache: name: Restore cannon prestate cache key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} @@ -1042,9 +1128,9 @@ jobs: cannon-prestate: docker: - - image: <> + - image: <> steps: - - checkout + - checkout-with-mise - setup_remote_docker - run: name: Build prestates @@ -1056,39 +1142,37 @@ jobs: - "op-program/bin/meta*" publish-cannon-prestates: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - attach_workspace: at: "." - gcp-cli/install - gcp-oidc-authenticate: - gcp_cred_config_file_path: /root/gcp_cred_config.json - oidc_token_file_path: /root/oidc_token.json + gcp_cred_config_file_path: /tmp/gcp_cred_config.json + oidc_token_file_path: /tmp/oidc_token.json - run: name: Upload cannon prestates command: | # Use the actual hash for tags (hash can be found by reading releases.json) PRESTATE_HASH=$(jq -r .pre ./op-program/bin/prestate-proof.json) - PRESTATE_MT_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-mt.json) + PRESTATE_MT64_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-mt64.json) PRESTATE_INTEROP_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-interop.json) BRANCH_NAME=$(echo "<< pipeline.git.branch >>" | tr '/' '-') - echo "Publishing ${PRESTATE_HASH}, ${PRESTATE_MT_HASH}, ${PRESTATE_INTEROP_HASH} as ${BRANCH_NAME}" + echo "Publishing ${PRESTATE_HASH}, ${PRESTATE_MT64_HASH}, ${PRESTATE_INTEROP_HASH} as ${BRANCH_NAME}" if [[ "" != "<< pipeline.git.branch >>" ]] then # Use the branch name for branches to provide a consistent URL PRESTATE_HASH="${BRANCH_NAME}" - PRESTATE_MT_HASH="${BRANCH_NAME}" + PRESTATE_MT64_HASH="${BRANCH_NAME}" PRESTATE_INTEROP_HASH="${BRANCH_NAME}" fi gsutil cp ./op-program/bin/prestate.bin.gz \ "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_HASH}.bin.gz" - gsutil cp ./op-program/bin/prestate-mt.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT_HASH}-mt.bin.gz" gsutil cp ./op-program/bin/prestate-mt64.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT_HASH}-mt64.bin.gz" + "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64_HASH}-mt64.bin.gz" gsutil cp ./op-program/bin/prestate-interop.bin.gz \ "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_HASH}-interop.bin.gz" - notify-failures-on-develop: @@ -1096,9 +1180,9 @@ jobs: preimage-reproducibility: docker: - - image: <> + - image: <> steps: - - checkout + - checkout-with-mise - setup_remote_docker - run: make -C op-program verify-reproducibility - notify-failures-on-develop: @@ -1106,9 +1190,9 @@ jobs: cannon-stf-verify: docker: - - image: <> + - image: <> steps: - - checkout + - checkout-with-mise - setup_remote_docker - run: name: Build cannon @@ -1136,7 +1220,7 @@ jobs: - image: returntocorp/semgrep resource_class: xlarge steps: - - checkout + - checkout # no need to use mise here since the docker image contains the only dependency - unless: condition: equal: ["develop", << pipeline.git.branch >>] @@ -1165,10 +1249,7 @@ jobs: bedrock-go-tests: # just a helper, that depends on all the actual test jobs docker: - # Use a smaller base image to avoid pulling the huge ci-builder - # image which is not needed for this job and sometimes misses - # the cache. - - image: cimg/base:2024.01 + - image: <> resource_class: medium steps: - run: echo Done @@ -1178,7 +1259,7 @@ jobs: docker: - image: cimg/go:1.21 steps: - - checkout + - checkout-with-mise - run: name: verify-sepolia command: | @@ -1188,31 +1269,21 @@ jobs: mentions: "@proofs-team" op-program-compat: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout - - restore_cache: - name: Restore Go modules cache - key: gomod-{{ checksum "go.sum" }} - - restore_cache: - key: golang-build-cache-op-program-compat-{{ checksum "go.sum" }} + - checkout-with-mise - run: name: compat-sepolia command: | make verify-compat working_directory: op-program - - save_cache: - name: Save Go build cache - key: golang-build-cache-op-program-compat-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" check-generated-mocks-op-node: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - check-changed: patterns: op-node - run: @@ -1220,10 +1291,10 @@ jobs: command: make generate-mocks-op-node && git diff --exit-code check-generated-mocks-op-service: - docker: - - image: <> + machine: true + resource_class: ethereum-optimism/latitude-1 steps: - - checkout + - checkout-with-mise - check-changed: patterns: op-service - run: @@ -1232,10 +1303,10 @@ jobs: kontrol-tests: docker: - - image: << pipeline.parameters.ci_builder_image >> + - image: <> resource_class: xlarge steps: - - checkout + - checkout-with-mise - install-contracts-dependencies - check-changed: no_go_deps: "true" @@ -1272,7 +1343,7 @@ jobs: oidc_token_file_path: /tmp/oidc_token.json project_id: GCP_TOOLS_ARTIFACTS_PROJECT_ID service_account_email: GCP_CONTRACTS_PUBLISHER_SERVICE_ACCOUNT_EMAIL - - checkout + - checkout-with-mise - install-contracts-dependencies - run: name: Pull artifacts @@ -1299,23 +1370,15 @@ jobs: default: .goreleaser.yaml type: string docker: - - image: <> + - image: <> resource_class: large steps: - setup_remote_docker - gcp-cli/install - gcp-oidc-authenticate: - gcp_cred_config_file_path: /root/gcp_cred_config.json - oidc_token_file_path: /root/oidc_token.json - - checkout - - run: - name: Install goreleaser pro - command: | - mkdir -p /tmp/goreleaser - cd /tmp/goreleaser - curl -L -o goreleaser.tgz https://github.com/goreleaser/goreleaser-pro/releases/download/v2.4.3-pro/goreleaser-pro_Linux_x86_64.tar.gz - tar -xzvf goreleaser.tgz - mv goreleaser /usr/local/bin/goreleaser + gcp_cred_config_file_path: /tmp/gcp_cred_config.json + oidc_token_file_path: /tmp/oidc_token.json + - checkout-with-mise - run: name: Configure Docker command: | @@ -1383,6 +1446,9 @@ workflows: - contracts-bedrock-checks: requires: - contracts-bedrock-build + - contracts-bedrock-frozen-code: + requires: + - contracts-bedrock-checks - diff-asterisc-bytecode - semgrep-scan: name: semgrep-scan-local @@ -1415,6 +1481,7 @@ workflows: requires: ["contracts-bedrock-build"] - go-tests: packages: | + op-alt-da op-batcher op-chain-ops op-node @@ -1519,7 +1586,7 @@ workflows: type: approval filters: tags: - only: /^(da-server|ci-builder(-rust)?|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ + only: /^(da-server|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ branches: ignore: /.*/ # Standard (medium) cross-platform docker images go here @@ -1585,8 +1652,6 @@ workflows: matrix: parameters: docker_name: - - ci-builder - - ci-builder-rust - proofs-tools name: <>-docker-release resource_class: xlarge @@ -1674,7 +1739,7 @@ workflows: resource_class: ethereum-optimism/latitude-fps-1 environment_overrides: | export OP_E2E_CANNON_ENABLED="true" - export PARALLEL=8 + export PARALLEL=24 packages: | op-e2e/faultproofs context: diff --git a/.gitignore b/.gitignore index 4323c5f4185..f58f7f9c1f6 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,8 @@ cache !op-chain-ops/foundry/testdata/srcmaps/cache !op-chain-ops/foundry/testdata/srcmaps/artifacts +!op-deployer/pkg/deployer/artifacts + packages/contracts-bedrock/deployments/devnetL1 packages/contracts-bedrock/deployments/anvil diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2fcb5480c1a..2eeb49e06b5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,19 @@ You can: - **IMPORTANT**: If you believe your report impacts the security of this repository, refer to the canonical [Security Policy](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md) document. - Fix issues that are tagged as [`D-good-first-issue`](https://github.com/ethereum-optimism/optimism/labels/D-good-first-issue) or [`S-confirmed`](https://github.com/ethereum-optimism/optimism/labels/S-confirmed). - Larger projects are listed on [this project board](https://github.com/orgs/ethereum-optimism/projects/31/views/9). Please talk to us if you're considering working on one of these, they may not be fully specified so it will reduce risk to discuss the approach and ensure that it's still relevant. -- Help improve the [Optimism Developer Docs](https://github.com/ethereum-optimism/docs) by reporting issues, fixing typos, or adding missing sections. +- Help improve the [Optimism Docs] by reporting issues or adding missing sections. - Get involved in the protocol design process by joining discussions within the [OP Stack Specs](https://github.com/ethereum-optimism/specs/discussions) repository. +[Optimism Docs]: https://github.com/ethereum-optimism/docs + +### Contributions Related to Spelling and Grammar + +At this time, we will not be accepting contributions that primarily fix +spelling, stylistic or grammatical errors in documentation, code or elsewhere. + +Pull Requests that ignore this guideline will be closed, +and may be aggregated into new Pull Requests without attribution. + ## Code of Conduct Interactions within this repository are subject to a [Code of Conduct](https://github.com/ethereum-optimism/.github/blob/master/CODE_OF_CONDUCT.md) adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/1/4/code-of-conduct/). diff --git a/Makefile b/Makefile index e2f64c85670..c6dc96357d3 100644 --- a/Makefile +++ b/Makefile @@ -28,10 +28,6 @@ lint-go-fix: ## Lints Go code with specific linters and fixes reported issues golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... --fix .PHONY: lint-go-fix -ci-builder: ## Builds the CI builder Docker image - docker build -t ci-builder -f ops/docker/ci-builder/Dockerfile . -.PHONY: ci-builder - golang-docker: ## Builds Docker images for Go components using buildx # We don't use a buildx builder here, and just load directly into regular docker, for convenience. GIT_COMMIT=$$(git rev-parse HEAD) \ @@ -135,15 +131,15 @@ reproducible-prestate: ## Builds reproducible-prestate binary .PHONY: reproducible-prestate # Include any files required for the devnet to build and run. -DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.bin.gz op-program/bin/prestate-proof-mt.json op-program/bin/prestate-mt.bin.gz op-program/bin/prestate-interop.bin.gz +DEVNET_CANNON_PRESTATE_FILES := op-program/bin/prestate-proof.json op-program/bin/prestate.bin.gz op-program/bin/prestate-proof-mt64.json op-program/bin/prestate-mt64.bin.gz op-program/bin/prestate-interop.bin.gz $(DEVNET_CANNON_PRESTATE_FILES): make cannon-prestate - make cannon-prestate-mt + make cannon-prestate-mt64 make cannon-prestate-interop -cannon-prestates: cannon-prestate cannon-prestate-mt cannon-prestate-interop +cannon-prestates: cannon-prestate cannon-prestate-mt64 cannon-prestate-interop .PHONY: cannon-prestates cannon-prestate: op-program cannon ## Generates prestate using cannon and op-program @@ -152,11 +148,11 @@ cannon-prestate: op-program cannon ## Generates prestate using cannon and op-pro mv op-program/bin/0.json op-program/bin/prestate-proof.json .PHONY: cannon-prestate -cannon-prestate-mt: op-program cannon ## Generates prestate using cannon and op-program in the latest 64-bit multithreaded cannon format - ./cannon/bin/cannon load-elf --type multithreaded64-3 --path op-program/bin/op-program-client64.elf --out op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json - ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json --proof-fmt 'op-program/bin/%d-mt.json' --output "" - mv op-program/bin/0-mt.json op-program/bin/prestate-proof-mt.json -.PHONY: cannon-prestate-mt +cannon-prestate-mt64: op-program cannon ## Generates prestate using cannon and op-program in the latest 64-bit multithreaded cannon format + ./cannon/bin/cannon load-elf --type multithreaded64-3 --path op-program/bin/op-program-client64.elf --out op-program/bin/prestate-mt64.bin.gz --meta op-program/bin/meta-mt64.json + ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate-mt64.bin.gz --meta op-program/bin/meta-mt64.json --proof-fmt 'op-program/bin/%d-mt64.json' --output "" + mv op-program/bin/0-mt64.json op-program/bin/prestate-proof-mt64.json +.PHONY: cannon-prestate-mt64 cannon-prestate-interop: op-program cannon ## Generates interop prestate using cannon and op-program in the latest 64-bit multithreaded cannon format ./cannon/bin/cannon load-elf --type multithreaded64-3 --path op-program/bin/op-program-client-interop.elf --out op-program/bin/prestate-interop.bin.gz --meta op-program/bin/meta-interop.json diff --git a/README.md b/README.md index e2b499f648f..d4ad5efe04f 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,6 @@ See the [Node Software Releases](https://docs.optimism.io/builders/node-operator The full set of components that have releases are: -- `ci-builder` - `op-batcher` - `op-contracts` - `op-challenger` diff --git a/cannon/Makefile b/cannon/Makefile index a9d79d29727..c6e63bad1ca 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -50,7 +50,8 @@ elf: make -C ./testdata/example elf sanitize-program: - @if ! { mips-linux-gnu-objdump -d -j .text $$GUEST_PROGRAM | awk '{print $$3}' | grep -Ew -m1 "$(UNSUPPORTED_OPCODES)"; }; then \ + mips-linux-gnu-objdump -d -j .text $$GUEST_PROGRAM > ./bin/dump.txt + @if ! { cat ./bin/dump.txt | awk '{print $$3}' | grep -Ew -m1 "$(UNSUPPORTED_OPCODES)"; }; then \ echo "guest program is sanitized for unsupported instructions"; \ else \ echo "found unsupported instructions in the guest program"; \ diff --git a/cannon/mipsevm/exec/mips_instructions.go b/cannon/mipsevm/exec/mips_instructions.go index e0d91893d1a..a5035aaa708 100644 --- a/cannon/mipsevm/exec/mips_instructions.go +++ b/cannon/mipsevm/exec/mips_instructions.go @@ -19,6 +19,9 @@ const ( // Return address register RegRA = 31 + + // Masks + U32Mask = 0xFFffFFff ) func GetInstructionDetails(pc Word, memory *memory.Memory) (insn, opcode, fun uint32) { @@ -205,14 +208,16 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem switch fun { case 0x00: // sll - return SignExtend((rt&0xFFFFFFFF)<<((insn>>6)&0x1F), 32) + shiftAmt := (insn >> 6) & 0x1F + return SignExtend((rt<>((insn>>6)&0x1F), 32) case 0x03: // sra shamt := Word((insn >> 6) & 0x1F) return SignExtend((rt&0xFFFFFFFF)>>shamt, 32-shamt) case 0x04: // sllv - return SignExtend((rt&0xFFFFFFFF)<<(rs&0x1F), 32) + shiftAmt := rs & 0x1F + return SignExtend((rt<>(rs&0x1F), 32) case 0x07: // srav diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 4e04a7c10f5..9f0812c8fd8 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -552,10 +552,18 @@ func TestEVM_SingleStep_SlSr(t *testing.T) { funct uint16 expectVal Word }{ - {name: "sll", funct: uint16(4) << 6, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) << uint8(4)}, // sll t0, t1, 3 - {name: "srl", funct: uint16(4)<<6 | 2, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) >> uint8(4)}, // srl t0, t1, 3 - {name: "sra", funct: uint16(4)<<6 | 3, rt: Word(0x80_00_00_20), rsReg: uint32(0x0), expectVal: signExtend64(0xF8_00_00_02)}, // sra t0, t1, 3 - {name: "sllv", funct: uint16(4), rt: Word(0x20), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20) << Word(4)}, // sllv t0, t1, t2 + {name: "sll", funct: uint16(4) << 6, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) << uint8(4)}, // sll t0, t1, 3 + {name: "sll with overflow", funct: uint16(1) << 6, rt: Word(0x8000_0000), rsReg: uint32(0x0), expectVal: 0x0}, + {name: "sll with sign extension", funct: uint16(4) << 6, rt: Word(0x0800_0000), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, + {name: "sll with max shift, sign extension", funct: uint16(31) << 6, rt: Word(0x01), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, + {name: "sll with max shift, overflow", funct: uint16(31) << 6, rt: Word(0x02), rsReg: uint32(0x0), expectVal: 0x0}, + {name: "srl", funct: uint16(4)<<6 | 2, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x20) >> uint8(4)}, // srl t0, t1, 3 + {name: "sra", funct: uint16(4)<<6 | 3, rt: Word(0x80_00_00_20), rsReg: uint32(0x0), expectVal: signExtend64(0xF8_00_00_02)}, // sra t0, t1, 3 + {name: "sllv", funct: uint16(4), rt: Word(0x20), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20) << Word(4)}, // sllv t0, t1, t2 + {name: "sllv with overflow", funct: uint16(4), rt: Word(0x8000_0000), rs: Word(1), rsReg: uint32(0xa), expectVal: 0x0}, + {name: "sllv with sign extension", funct: uint16(4), rt: Word(0x0800_0000), rs: Word(4), rsReg: uint32(0xa), expectVal: signExtend64(0x8000_0000)}, + {name: "sllv with max shift, sign extension", funct: uint16(4), rt: Word(0x01), rs: Word(31), rsReg: uint32(0xa), expectVal: signExtend64(0x8000_0000)}, + {name: "sllv with max shift, overflow", funct: uint16(4), rt: Word(0x02), rs: Word(31), rsReg: uint32(0xa), expectVal: 0x0}, {name: "srlv", funct: uint16(6), rt: Word(0x20_00), rs: Word(4), rsReg: uint32(0xa), expectVal: Word(0x20_00) >> Word(4)}, // srlv t0, t1, t2 {name: "srav", funct: uint16(7), rt: Word(0xdeafbeef), rs: Word(12), rsReg: uint32(0xa), expectVal: signExtend64(Word(0xfffdeafb))}, // srav t0, t1, t2 } diff --git a/cannon/mipsevm/testutil/evm.go b/cannon/mipsevm/testutil/evm.go index 6a4832c2953..745f7c662b2 100644 --- a/cannon/mipsevm/testutil/evm.go +++ b/cannon/mipsevm/testutil/evm.go @@ -109,7 +109,7 @@ func NewEVMEnv(contracts *ContractMetadata) (*vm.EVM, *state.StateDB) { blockContext := core.NewEVMBlockContext(header, bc, nil, chainCfg, state) vmCfg := vm.Config{} - env := vm.NewEVM(blockContext, vm.TxContext{}, state, chainCfg, vmCfg) + env := vm.NewEVM(blockContext, state, chainCfg, vmCfg) // pre-deploy the contracts env.StateDB.SetCode(contracts.Addresses.Oracle, contracts.Artifacts.Oracle.DeployedBytecode.Object) diff --git a/devnet-sdk/constraints/constraints.go b/devnet-sdk/constraints/constraints.go new file mode 100644 index 00000000000..c0541bd796a --- /dev/null +++ b/devnet-sdk/constraints/constraints.go @@ -0,0 +1,25 @@ +package constraints + +import ( + "log/slog" + + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +type WalletConstraint interface { + CheckWallet(wallet types.Wallet) bool +} + +type WalletConstraintFunc func(wallet types.Wallet) bool + +func (f WalletConstraintFunc) CheckWallet(wallet types.Wallet) bool { + return f(wallet) +} + +func WithBalance(amount types.Balance) WalletConstraint { + return WalletConstraintFunc(func(wallet types.Wallet) bool { + balance := wallet.Balance() + slog.Debug("checking balance", "wallet", wallet.Address(), "balance", balance, "needed", amount) + return balance.GreaterThan(amount) + }) +} diff --git a/devnet-sdk/contracts/bindings/superchainweth.go b/devnet-sdk/contracts/bindings/superchainweth.go new file mode 100644 index 00000000000..e0049ff9996 --- /dev/null +++ b/devnet-sdk/contracts/bindings/superchainweth.go @@ -0,0 +1,1879 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package bindings + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// SuperchainWETHMetaData contains all meta data concerning the SuperchainWETH contract. +var SuperchainWETHMetaData = &bind.MetaData{ + ABI: "[{\"type\":\"fallback\",\"stateMutability\":\"payable\"},{\"type\":\"receive\",\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"allowance\",\"inputs\":[{\"name\":\"owner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"spender\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"approve\",\"inputs\":[{\"name\":\"guy\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"balanceOf\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"crosschainBurn\",\"inputs\":[{\"name\":\"_from\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"crosschainMint\",\"inputs\":[{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"decimals\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint8\",\"internalType\":\"uint8\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"deposit\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"name\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"relayETH\",\"inputs\":[{\"name\":\"_from\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"sendETH\",\"inputs\":[{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_chainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"msgHash_\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"supportsInterface\",\"inputs\":[{\"name\":\"_interfaceId\",\"type\":\"bytes4\",\"internalType\":\"bytes4\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"symbol\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"totalSupply\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transfer\",\"inputs\":[{\"name\":\"dst\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"transferFrom\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"dst\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"withdraw\",\"inputs\":[{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"Approval\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"guy\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"CrosschainBurn\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"CrosschainMint\",\"inputs\":[{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Deposit\",\"inputs\":[{\"name\":\"dst\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"RelayETH\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"source\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"SendETH\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"destination\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Transfer\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"dst\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Withdrawal\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"InvalidCrossDomainSender\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"NotCustomGasToken\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"Unauthorized\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ZeroAddress\",\"inputs\":[]}]", +} + +// SuperchainWETHABI is the input ABI used to generate the binding from. +// Deprecated: Use SuperchainWETHMetaData.ABI instead. +var SuperchainWETHABI = SuperchainWETHMetaData.ABI + +// SuperchainWETH is an auto generated Go binding around an Ethereum contract. +type SuperchainWETH struct { + SuperchainWETHCaller // Read-only binding to the contract + SuperchainWETHTransactor // Write-only binding to the contract + SuperchainWETHFilterer // Log filterer for contract events +} + +// SuperchainWETHCaller is an auto generated read-only Go binding around an Ethereum contract. +type SuperchainWETHCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// SuperchainWETHTransactor is an auto generated write-only Go binding around an Ethereum contract. +type SuperchainWETHTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// SuperchainWETHFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type SuperchainWETHFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// SuperchainWETHSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type SuperchainWETHSession struct { + Contract *SuperchainWETH // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// SuperchainWETHCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type SuperchainWETHCallerSession struct { + Contract *SuperchainWETHCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// SuperchainWETHTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type SuperchainWETHTransactorSession struct { + Contract *SuperchainWETHTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// SuperchainWETHRaw is an auto generated low-level Go binding around an Ethereum contract. +type SuperchainWETHRaw struct { + Contract *SuperchainWETH // Generic contract binding to access the raw methods on +} + +// SuperchainWETHCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type SuperchainWETHCallerRaw struct { + Contract *SuperchainWETHCaller // Generic read-only contract binding to access the raw methods on +} + +// SuperchainWETHTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type SuperchainWETHTransactorRaw struct { + Contract *SuperchainWETHTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewSuperchainWETH creates a new instance of SuperchainWETH, bound to a specific deployed contract. +func NewSuperchainWETH(address common.Address, backend bind.ContractBackend) (*SuperchainWETH, error) { + contract, err := bindSuperchainWETH(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &SuperchainWETH{SuperchainWETHCaller: SuperchainWETHCaller{contract: contract}, SuperchainWETHTransactor: SuperchainWETHTransactor{contract: contract}, SuperchainWETHFilterer: SuperchainWETHFilterer{contract: contract}}, nil +} + +// NewSuperchainWETHCaller creates a new read-only instance of SuperchainWETH, bound to a specific deployed contract. +func NewSuperchainWETHCaller(address common.Address, caller bind.ContractCaller) (*SuperchainWETHCaller, error) { + contract, err := bindSuperchainWETH(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &SuperchainWETHCaller{contract: contract}, nil +} + +// NewSuperchainWETHTransactor creates a new write-only instance of SuperchainWETH, bound to a specific deployed contract. +func NewSuperchainWETHTransactor(address common.Address, transactor bind.ContractTransactor) (*SuperchainWETHTransactor, error) { + contract, err := bindSuperchainWETH(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &SuperchainWETHTransactor{contract: contract}, nil +} + +// NewSuperchainWETHFilterer creates a new log filterer instance of SuperchainWETH, bound to a specific deployed contract. +func NewSuperchainWETHFilterer(address common.Address, filterer bind.ContractFilterer) (*SuperchainWETHFilterer, error) { + contract, err := bindSuperchainWETH(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &SuperchainWETHFilterer{contract: contract}, nil +} + +// bindSuperchainWETH binds a generic wrapper to an already deployed contract. +func bindSuperchainWETH(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(SuperchainWETHABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_SuperchainWETH *SuperchainWETHRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SuperchainWETH.Contract.SuperchainWETHCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_SuperchainWETH *SuperchainWETHRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SuperchainWETH.Contract.SuperchainWETHTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_SuperchainWETH *SuperchainWETHRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SuperchainWETH.Contract.SuperchainWETHTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_SuperchainWETH *SuperchainWETHCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _SuperchainWETH.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_SuperchainWETH *SuperchainWETHTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SuperchainWETH.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_SuperchainWETH *SuperchainWETHTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _SuperchainWETH.Contract.contract.Transact(opts, method, params...) +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address owner, address spender) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCaller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "allowance", owner, spender) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address owner, address spender) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _SuperchainWETH.Contract.Allowance(&_SuperchainWETH.CallOpts, owner, spender) +} + +// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. +// +// Solidity: function allowance(address owner, address spender) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { + return _SuperchainWETH.Contract.Allowance(&_SuperchainWETH.CallOpts, owner, spender) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address src) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCaller) BalanceOf(opts *bind.CallOpts, src common.Address) (*big.Int, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "balanceOf", src) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address src) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHSession) BalanceOf(src common.Address) (*big.Int, error) { + return _SuperchainWETH.Contract.BalanceOf(&_SuperchainWETH.CallOpts, src) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address src) view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCallerSession) BalanceOf(src common.Address) (*big.Int, error) { + return _SuperchainWETH.Contract.BalanceOf(&_SuperchainWETH.CallOpts, src) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_SuperchainWETH *SuperchainWETHCaller) Decimals(opts *bind.CallOpts) (uint8, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "decimals") + + if err != nil { + return *new(uint8), err + } + + out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) + + return out0, err + +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_SuperchainWETH *SuperchainWETHSession) Decimals() (uint8, error) { + return _SuperchainWETH.Contract.Decimals(&_SuperchainWETH.CallOpts) +} + +// Decimals is a free data retrieval call binding the contract method 0x313ce567. +// +// Solidity: function decimals() view returns(uint8) +func (_SuperchainWETH *SuperchainWETHCallerSession) Decimals() (uint8, error) { + return _SuperchainWETH.Contract.Decimals(&_SuperchainWETH.CallOpts) +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() view returns(string) +func (_SuperchainWETH *SuperchainWETHCaller) Name(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "name") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() view returns(string) +func (_SuperchainWETH *SuperchainWETHSession) Name() (string, error) { + return _SuperchainWETH.Contract.Name(&_SuperchainWETH.CallOpts) +} + +// Name is a free data retrieval call binding the contract method 0x06fdde03. +// +// Solidity: function name() view returns(string) +func (_SuperchainWETH *SuperchainWETHCallerSession) Name() (string, error) { + return _SuperchainWETH.Contract.Name(&_SuperchainWETH.CallOpts) +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) +func (_SuperchainWETH *SuperchainWETHCaller) SupportsInterface(opts *bind.CallOpts, _interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "supportsInterface", _interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) +func (_SuperchainWETH *SuperchainWETHSession) SupportsInterface(_interfaceId [4]byte) (bool, error) { + return _SuperchainWETH.Contract.SupportsInterface(&_SuperchainWETH.CallOpts, _interfaceId) +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) +func (_SuperchainWETH *SuperchainWETHCallerSession) SupportsInterface(_interfaceId [4]byte) (bool, error) { + return _SuperchainWETH.Contract.SupportsInterface(&_SuperchainWETH.CallOpts, _interfaceId) +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() view returns(string) +func (_SuperchainWETH *SuperchainWETHCaller) Symbol(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "symbol") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() view returns(string) +func (_SuperchainWETH *SuperchainWETHSession) Symbol() (string, error) { + return _SuperchainWETH.Contract.Symbol(&_SuperchainWETH.CallOpts) +} + +// Symbol is a free data retrieval call binding the contract method 0x95d89b41. +// +// Solidity: function symbol() view returns(string) +func (_SuperchainWETH *SuperchainWETHCallerSession) Symbol() (string, error) { + return _SuperchainWETH.Contract.Symbol(&_SuperchainWETH.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_SuperchainWETH *SuperchainWETHSession) TotalSupply() (*big.Int, error) { + return _SuperchainWETH.Contract.TotalSupply(&_SuperchainWETH.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_SuperchainWETH *SuperchainWETHCallerSession) TotalSupply() (*big.Int, error) { + return _SuperchainWETH.Contract.TotalSupply(&_SuperchainWETH.CallOpts) +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(string) +func (_SuperchainWETH *SuperchainWETHCaller) Version(opts *bind.CallOpts) (string, error) { + var out []interface{} + err := _SuperchainWETH.contract.Call(opts, &out, "version") + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(string) +func (_SuperchainWETH *SuperchainWETHSession) Version() (string, error) { + return _SuperchainWETH.Contract.Version(&_SuperchainWETH.CallOpts) +} + +// Version is a free data retrieval call binding the contract method 0x54fd4d50. +// +// Solidity: function version() view returns(string) +func (_SuperchainWETH *SuperchainWETHCallerSession) Version() (string, error) { + return _SuperchainWETH.Contract.Version(&_SuperchainWETH.CallOpts) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address guy, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactor) Approve(opts *bind.TransactOpts, guy common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "approve", guy, wad) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address guy, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHSession) Approve(guy common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Approve(&_SuperchainWETH.TransactOpts, guy, wad) +} + +// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. +// +// Solidity: function approve(address guy, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactorSession) Approve(guy common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Approve(&_SuperchainWETH.TransactOpts, guy, wad) +} + +// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. +// +// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactor) CrosschainBurn(opts *bind.TransactOpts, _from common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "crosschainBurn", _from, _amount) +} + +// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. +// +// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHSession) CrosschainBurn(_from common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.CrosschainBurn(&_SuperchainWETH.TransactOpts, _from, _amount) +} + +// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. +// +// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) CrosschainBurn(_from common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.CrosschainBurn(&_SuperchainWETH.TransactOpts, _from, _amount) +} + +// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. +// +// Solidity: function crosschainMint(address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactor) CrosschainMint(opts *bind.TransactOpts, _to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "crosschainMint", _to, _amount) +} + +// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. +// +// Solidity: function crosschainMint(address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHSession) CrosschainMint(_to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.CrosschainMint(&_SuperchainWETH.TransactOpts, _to, _amount) +} + +// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. +// +// Solidity: function crosschainMint(address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) CrosschainMint(_to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.CrosschainMint(&_SuperchainWETH.TransactOpts, _to, _amount) +} + +// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. +// +// Solidity: function deposit() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "deposit") +} + +// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. +// +// Solidity: function deposit() payable returns() +func (_SuperchainWETH *SuperchainWETHSession) Deposit() (*types.Transaction, error) { + return _SuperchainWETH.Contract.Deposit(&_SuperchainWETH.TransactOpts) +} + +// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. +// +// Solidity: function deposit() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) Deposit() (*types.Transaction, error) { + return _SuperchainWETH.Contract.Deposit(&_SuperchainWETH.TransactOpts) +} + +// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. +// +// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactor) RelayETH(opts *bind.TransactOpts, _from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "relayETH", _from, _to, _amount) +} + +// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. +// +// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHSession) RelayETH(_from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.RelayETH(&_SuperchainWETH.TransactOpts, _from, _to, _amount) +} + +// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. +// +// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) RelayETH(_from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.RelayETH(&_SuperchainWETH.TransactOpts, _from, _to, _amount) +} + +// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. +// +// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) +func (_SuperchainWETH *SuperchainWETHTransactor) SendETH(opts *bind.TransactOpts, _to common.Address, _chainId *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "sendETH", _to, _chainId) +} + +// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. +// +// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) +func (_SuperchainWETH *SuperchainWETHSession) SendETH(_to common.Address, _chainId *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.SendETH(&_SuperchainWETH.TransactOpts, _to, _chainId) +} + +// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. +// +// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) +func (_SuperchainWETH *SuperchainWETHTransactorSession) SendETH(_to common.Address, _chainId *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.SendETH(&_SuperchainWETH.TransactOpts, _to, _chainId) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactor) Transfer(opts *bind.TransactOpts, dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "transfer", dst, wad) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHSession) Transfer(dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Transfer(&_SuperchainWETH.TransactOpts, dst, wad) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactorSession) Transfer(dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Transfer(&_SuperchainWETH.TransactOpts, dst, wad) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactor) TransferFrom(opts *bind.TransactOpts, src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "transferFrom", src, dst, wad) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHSession) TransferFrom(src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.TransferFrom(&_SuperchainWETH.TransactOpts, src, dst, wad) +} + +// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. +// +// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) +func (_SuperchainWETH *SuperchainWETHTransactorSession) TransferFrom(src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.TransferFrom(&_SuperchainWETH.TransactOpts, src, dst, wad) +} + +// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. +// +// Solidity: function withdraw(uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.contract.Transact(opts, "withdraw", _amount) +} + +// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. +// +// Solidity: function withdraw(uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHSession) Withdraw(_amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Withdraw(&_SuperchainWETH.TransactOpts, _amount) +} + +// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. +// +// Solidity: function withdraw(uint256 _amount) returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) Withdraw(_amount *big.Int) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Withdraw(&_SuperchainWETH.TransactOpts, _amount) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _SuperchainWETH.contract.RawTransact(opts, calldata) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_SuperchainWETH *SuperchainWETHSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Fallback(&_SuperchainWETH.TransactOpts, calldata) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _SuperchainWETH.Contract.Fallback(&_SuperchainWETH.TransactOpts, calldata) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _SuperchainWETH.contract.RawTransact(opts, nil) // calldata is disallowed for receive function +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_SuperchainWETH *SuperchainWETHSession) Receive() (*types.Transaction, error) { + return _SuperchainWETH.Contract.Receive(&_SuperchainWETH.TransactOpts) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_SuperchainWETH *SuperchainWETHTransactorSession) Receive() (*types.Transaction, error) { + return _SuperchainWETH.Contract.Receive(&_SuperchainWETH.TransactOpts) +} + +// SuperchainWETHApprovalIterator is returned from FilterApproval and is used to iterate over the raw logs and unpacked data for Approval events raised by the SuperchainWETH contract. +type SuperchainWETHApprovalIterator struct { + Event *SuperchainWETHApproval // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHApprovalIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHApproval) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHApprovalIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHApprovalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHApproval represents a Approval event raised by the SuperchainWETH contract. +type SuperchainWETHApproval struct { + Src common.Address + Guy common.Address + Wad *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterApproval is a free log retrieval operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterApproval(opts *bind.FilterOpts, src []common.Address, guy []common.Address) (*SuperchainWETHApprovalIterator, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + var guyRule []interface{} + for _, guyItem := range guy { + guyRule = append(guyRule, guyItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Approval", srcRule, guyRule) + if err != nil { + return nil, err + } + return &SuperchainWETHApprovalIterator{contract: _SuperchainWETH.contract, event: "Approval", logs: logs, sub: sub}, nil +} + +// WatchApproval is a free log subscription operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *SuperchainWETHApproval, src []common.Address, guy []common.Address) (event.Subscription, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + var guyRule []interface{} + for _, guyItem := range guy { + guyRule = append(guyRule, guyItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Approval", srcRule, guyRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHApproval) + if err := _SuperchainWETH.contract.UnpackLog(event, "Approval", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseApproval is a log parse operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. +// +// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseApproval(log types.Log) (*SuperchainWETHApproval, error) { + event := new(SuperchainWETHApproval) + if err := _SuperchainWETH.contract.UnpackLog(event, "Approval", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHCrosschainBurnIterator is returned from FilterCrosschainBurn and is used to iterate over the raw logs and unpacked data for CrosschainBurn events raised by the SuperchainWETH contract. +type SuperchainWETHCrosschainBurnIterator struct { + Event *SuperchainWETHCrosschainBurn // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHCrosschainBurnIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHCrosschainBurn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHCrosschainBurn) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHCrosschainBurnIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHCrosschainBurnIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHCrosschainBurn represents a CrosschainBurn event raised by the SuperchainWETH contract. +type SuperchainWETHCrosschainBurn struct { + From common.Address + Amount *big.Int + Sender common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterCrosschainBurn is a free log retrieval operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. +// +// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterCrosschainBurn(opts *bind.FilterOpts, from []common.Address, sender []common.Address) (*SuperchainWETHCrosschainBurnIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "CrosschainBurn", fromRule, senderRule) + if err != nil { + return nil, err + } + return &SuperchainWETHCrosschainBurnIterator{contract: _SuperchainWETH.contract, event: "CrosschainBurn", logs: logs, sub: sub}, nil +} + +// WatchCrosschainBurn is a free log subscription operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. +// +// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchCrosschainBurn(opts *bind.WatchOpts, sink chan<- *SuperchainWETHCrosschainBurn, from []common.Address, sender []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "CrosschainBurn", fromRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHCrosschainBurn) + if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainBurn", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseCrosschainBurn is a log parse operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. +// +// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseCrosschainBurn(log types.Log) (*SuperchainWETHCrosschainBurn, error) { + event := new(SuperchainWETHCrosschainBurn) + if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainBurn", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHCrosschainMintIterator is returned from FilterCrosschainMint and is used to iterate over the raw logs and unpacked data for CrosschainMint events raised by the SuperchainWETH contract. +type SuperchainWETHCrosschainMintIterator struct { + Event *SuperchainWETHCrosschainMint // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHCrosschainMintIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHCrosschainMint) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHCrosschainMint) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHCrosschainMintIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHCrosschainMintIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHCrosschainMint represents a CrosschainMint event raised by the SuperchainWETH contract. +type SuperchainWETHCrosschainMint struct { + To common.Address + Amount *big.Int + Sender common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterCrosschainMint is a free log retrieval operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. +// +// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterCrosschainMint(opts *bind.FilterOpts, to []common.Address, sender []common.Address) (*SuperchainWETHCrosschainMintIterator, error) { + + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "CrosschainMint", toRule, senderRule) + if err != nil { + return nil, err + } + return &SuperchainWETHCrosschainMintIterator{contract: _SuperchainWETH.contract, event: "CrosschainMint", logs: logs, sub: sub}, nil +} + +// WatchCrosschainMint is a free log subscription operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. +// +// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchCrosschainMint(opts *bind.WatchOpts, sink chan<- *SuperchainWETHCrosschainMint, to []common.Address, sender []common.Address) (event.Subscription, error) { + + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "CrosschainMint", toRule, senderRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHCrosschainMint) + if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainMint", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseCrosschainMint is a log parse operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. +// +// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseCrosschainMint(log types.Log) (*SuperchainWETHCrosschainMint, error) { + event := new(SuperchainWETHCrosschainMint) + if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainMint", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHDepositIterator is returned from FilterDeposit and is used to iterate over the raw logs and unpacked data for Deposit events raised by the SuperchainWETH contract. +type SuperchainWETHDepositIterator struct { + Event *SuperchainWETHDeposit // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHDepositIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHDeposit) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHDeposit) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHDepositIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHDepositIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHDeposit represents a Deposit event raised by the SuperchainWETH contract. +type SuperchainWETHDeposit struct { + Dst common.Address + Wad *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterDeposit is a free log retrieval operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. +// +// Solidity: event Deposit(address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*SuperchainWETHDepositIterator, error) { + + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Deposit", dstRule) + if err != nil { + return nil, err + } + return &SuperchainWETHDepositIterator{contract: _SuperchainWETH.contract, event: "Deposit", logs: logs, sub: sub}, nil +} + +// WatchDeposit is a free log subscription operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. +// +// Solidity: event Deposit(address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchDeposit(opts *bind.WatchOpts, sink chan<- *SuperchainWETHDeposit, dst []common.Address) (event.Subscription, error) { + + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Deposit", dstRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHDeposit) + if err := _SuperchainWETH.contract.UnpackLog(event, "Deposit", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseDeposit is a log parse operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. +// +// Solidity: event Deposit(address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseDeposit(log types.Log) (*SuperchainWETHDeposit, error) { + event := new(SuperchainWETHDeposit) + if err := _SuperchainWETH.contract.UnpackLog(event, "Deposit", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHRelayETHIterator is returned from FilterRelayETH and is used to iterate over the raw logs and unpacked data for RelayETH events raised by the SuperchainWETH contract. +type SuperchainWETHRelayETHIterator struct { + Event *SuperchainWETHRelayETH // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHRelayETHIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHRelayETH) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHRelayETH) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHRelayETHIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHRelayETHIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHRelayETH represents a RelayETH event raised by the SuperchainWETH contract. +type SuperchainWETHRelayETH struct { + From common.Address + To common.Address + Amount *big.Int + Source *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterRelayETH is a free log retrieval operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. +// +// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterRelayETH(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*SuperchainWETHRelayETHIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "RelayETH", fromRule, toRule) + if err != nil { + return nil, err + } + return &SuperchainWETHRelayETHIterator{contract: _SuperchainWETH.contract, event: "RelayETH", logs: logs, sub: sub}, nil +} + +// WatchRelayETH is a free log subscription operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. +// +// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchRelayETH(opts *bind.WatchOpts, sink chan<- *SuperchainWETHRelayETH, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "RelayETH", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHRelayETH) + if err := _SuperchainWETH.contract.UnpackLog(event, "RelayETH", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseRelayETH is a log parse operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. +// +// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseRelayETH(log types.Log) (*SuperchainWETHRelayETH, error) { + event := new(SuperchainWETHRelayETH) + if err := _SuperchainWETH.contract.UnpackLog(event, "RelayETH", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHSendETHIterator is returned from FilterSendETH and is used to iterate over the raw logs and unpacked data for SendETH events raised by the SuperchainWETH contract. +type SuperchainWETHSendETHIterator struct { + Event *SuperchainWETHSendETH // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHSendETHIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHSendETH) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHSendETH) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHSendETHIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHSendETHIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHSendETH represents a SendETH event raised by the SuperchainWETH contract. +type SuperchainWETHSendETH struct { + From common.Address + To common.Address + Amount *big.Int + Destination *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterSendETH is a free log retrieval operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. +// +// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterSendETH(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*SuperchainWETHSendETHIterator, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "SendETH", fromRule, toRule) + if err != nil { + return nil, err + } + return &SuperchainWETHSendETHIterator{contract: _SuperchainWETH.contract, event: "SendETH", logs: logs, sub: sub}, nil +} + +// WatchSendETH is a free log subscription operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. +// +// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchSendETH(opts *bind.WatchOpts, sink chan<- *SuperchainWETHSendETH, from []common.Address, to []common.Address) (event.Subscription, error) { + + var fromRule []interface{} + for _, fromItem := range from { + fromRule = append(fromRule, fromItem) + } + var toRule []interface{} + for _, toItem := range to { + toRule = append(toRule, toItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "SendETH", fromRule, toRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHSendETH) + if err := _SuperchainWETH.contract.UnpackLog(event, "SendETH", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseSendETH is a log parse operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. +// +// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseSendETH(log types.Log) (*SuperchainWETHSendETH, error) { + event := new(SuperchainWETHSendETH) + if err := _SuperchainWETH.contract.UnpackLog(event, "SendETH", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the SuperchainWETH contract. +type SuperchainWETHTransferIterator struct { + Event *SuperchainWETHTransfer // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHTransferIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHTransfer) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHTransferIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHTransferIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHTransfer represents a Transfer event raised by the SuperchainWETH contract. +type SuperchainWETHTransfer struct { + Src common.Address + Dst common.Address + Wad *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterTransfer is a free log retrieval operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. +// +// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterTransfer(opts *bind.FilterOpts, src []common.Address, dst []common.Address) (*SuperchainWETHTransferIterator, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Transfer", srcRule, dstRule) + if err != nil { + return nil, err + } + return &SuperchainWETHTransferIterator{contract: _SuperchainWETH.contract, event: "Transfer", logs: logs, sub: sub}, nil +} + +// WatchTransfer is a free log subscription operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. +// +// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *SuperchainWETHTransfer, src []common.Address, dst []common.Address) (event.Subscription, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + var dstRule []interface{} + for _, dstItem := range dst { + dstRule = append(dstRule, dstItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Transfer", srcRule, dstRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHTransfer) + if err := _SuperchainWETH.contract.UnpackLog(event, "Transfer", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseTransfer is a log parse operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. +// +// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseTransfer(log types.Log) (*SuperchainWETHTransfer, error) { + event := new(SuperchainWETHTransfer) + if err := _SuperchainWETH.contract.UnpackLog(event, "Transfer", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// SuperchainWETHWithdrawalIterator is returned from FilterWithdrawal and is used to iterate over the raw logs and unpacked data for Withdrawal events raised by the SuperchainWETH contract. +type SuperchainWETHWithdrawalIterator struct { + Event *SuperchainWETHWithdrawal // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *SuperchainWETHWithdrawalIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHWithdrawal) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(SuperchainWETHWithdrawal) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *SuperchainWETHWithdrawalIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *SuperchainWETHWithdrawalIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// SuperchainWETHWithdrawal represents a Withdrawal event raised by the SuperchainWETH contract. +type SuperchainWETHWithdrawal struct { + Src common.Address + Wad *big.Int + Raw types.Log // Blockchain specific contextual infos +} + +// FilterWithdrawal is a free log retrieval operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. +// +// Solidity: event Withdrawal(address indexed src, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*SuperchainWETHWithdrawalIterator, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + + logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Withdrawal", srcRule) + if err != nil { + return nil, err + } + return &SuperchainWETHWithdrawalIterator{contract: _SuperchainWETH.contract, event: "Withdrawal", logs: logs, sub: sub}, nil +} + +// WatchWithdrawal is a free log subscription operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. +// +// Solidity: event Withdrawal(address indexed src, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *SuperchainWETHWithdrawal, src []common.Address) (event.Subscription, error) { + + var srcRule []interface{} + for _, srcItem := range src { + srcRule = append(srcRule, srcItem) + } + + logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Withdrawal", srcRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(SuperchainWETHWithdrawal) + if err := _SuperchainWETH.contract.UnpackLog(event, "Withdrawal", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseWithdrawal is a log parse operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. +// +// Solidity: event Withdrawal(address indexed src, uint256 wad) +func (_SuperchainWETH *SuperchainWETHFilterer) ParseWithdrawal(log types.Log) (*SuperchainWETHWithdrawal, error) { + event := new(SuperchainWETHWithdrawal) + if err := _SuperchainWETH.contract.UnpackLog(event, "Withdrawal", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/devnet-sdk/contracts/constants/constants.go b/devnet-sdk/contracts/constants/constants.go new file mode 100644 index 00000000000..7e3b69ef6a6 --- /dev/null +++ b/devnet-sdk/contracts/constants/constants.go @@ -0,0 +1,15 @@ +package constants + +import "github.com/ethereum-optimism/optimism/devnet-sdk/types" + +const ( + L2ToL2CrossDomainMessenger types.Address = "0x4200000000000000000000000000000000000023" + SuperchainWETH types.Address = "0x4200000000000000000000000000000000000024" + ETHLiquidity types.Address = "0x4200000000000000000000000000000000000025" + SuperchainTokenBridge types.Address = "0x4200000000000000000000000000000000000028" +) + +const ( + ETH = 1e18 + Gwei = 1e9 +) diff --git a/devnet-sdk/contracts/contracts.go b/devnet-sdk/contracts/contracts.go new file mode 100644 index 00000000000..ad2e55dc2d4 --- /dev/null +++ b/devnet-sdk/contracts/contracts.go @@ -0,0 +1,17 @@ +package contracts + +import ( + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/client" + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/empty" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum/go-ethereum/ethclient" +) + +// NewClientRegistry creates a new Registry that uses the provided client +func NewClientRegistry(c *ethclient.Client) interfaces.ContractsRegistry { + return &client.ClientRegistry{Client: c} +} + +func NewEmptyRegistry() interfaces.ContractsRegistry { + return &empty.EmptyRegistry{} +} diff --git a/devnet-sdk/contracts/registry/client/client.go b/devnet-sdk/contracts/registry/client/client.go new file mode 100644 index 00000000000..4f6bc3308a4 --- /dev/null +++ b/devnet-sdk/contracts/registry/client/client.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +// ClientRegistry is a Registry implementation that uses an ethclient.Client +type ClientRegistry struct { + Client *ethclient.Client +} + +var _ interfaces.ContractsRegistry = (*ClientRegistry)(nil) + +func (r *ClientRegistry) SuperchainWETH(address types.Address) (interfaces.SuperchainWETH, error) { + binding, err := bindings.NewSuperchainWETH(common.HexToAddress(string(address)), r.Client) + if err != nil { + return nil, fmt.Errorf("failed to create SuperchainWETH binding: %w", err) + } + return &superchainWETHBinding{ + contractAddress: address, + client: r.Client, + binding: binding, + }, nil +} diff --git a/devnet-sdk/contracts/registry/client/superchainweth.go b/devnet-sdk/contracts/registry/client/superchainweth.go new file mode 100644 index 00000000000..d4860ca3450 --- /dev/null +++ b/devnet-sdk/contracts/registry/client/superchainweth.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" +) + +type superchainWETHBinding struct { + contractAddress types.Address + client *ethclient.Client + binding *bindings.SuperchainWETH +} + +var _ interfaces.SuperchainWETH = (*superchainWETHBinding)(nil) + +func (b *superchainWETHBinding) BalanceOf(addr types.Address) types.ReadInvocation[types.Balance] { + return &superchainWETHBalanceOfImpl{ + contract: b, + addr: addr, + } +} + +type superchainWETHBalanceOfImpl struct { + contract *superchainWETHBinding + addr types.Address +} + +func (i *superchainWETHBalanceOfImpl) Call(ctx context.Context) (types.Balance, error) { + balance, err := i.contract.binding.BalanceOf(nil, common.HexToAddress(string(i.addr))) + if err != nil { + return types.Balance{}, err + } + return types.NewBalance(balance), nil +} diff --git a/devnet-sdk/contracts/registry/empty/empty.go b/devnet-sdk/contracts/registry/empty/empty.go new file mode 100644 index 00000000000..7aca8152227 --- /dev/null +++ b/devnet-sdk/contracts/registry/empty/empty.go @@ -0,0 +1,18 @@ +package empty + +import ( + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +// EmptyRegistry represents a registry that returns not found errors for all contract accesses +type EmptyRegistry struct{} + +var _ interfaces.ContractsRegistry = (*EmptyRegistry)(nil) + +func (r *EmptyRegistry) SuperchainWETH(address types.Address) (interfaces.SuperchainWETH, error) { + return nil, &interfaces.ErrContractNotFound{ + ContractType: "SuperchainWETH", + Address: address, + } +} diff --git a/devnet-sdk/interfaces/registry.go b/devnet-sdk/interfaces/registry.go new file mode 100644 index 00000000000..5d97484e7f4 --- /dev/null +++ b/devnet-sdk/interfaces/registry.go @@ -0,0 +1,27 @@ +package interfaces + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +// ErrContractNotFound indicates that a contract is not available at the requested address +type ErrContractNotFound struct { + ContractType string + Address types.Address +} + +func (e *ErrContractNotFound) Error() string { + return fmt.Sprintf("%s contract not found at %s", e.ContractType, e.Address) +} + +// ContractsRegistry provides access to all supported contract instances +type ContractsRegistry interface { + SuperchainWETH(address types.Address) (SuperchainWETH, error) +} + +// SuperchainWETH represents the interface for interacting with the SuperchainWETH contract +type SuperchainWETH interface { + BalanceOf(user types.Address) types.ReadInvocation[types.Balance] +} diff --git a/devnet-sdk/shell/env/chain.go b/devnet-sdk/shell/env/chain.go index 8fcf4c50667..c07040baec6 100644 --- a/devnet-sdk/shell/env/chain.go +++ b/devnet-sdk/shell/env/chain.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "html/template" + "path/filepath" "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" ) @@ -88,7 +89,11 @@ func (c *ChainConfig) GetEnv() (*ChainEnv, error) { } // To allow commands within the shell to know which devnet and chain they are in - envVars[EnvFileVar] = c.devnetFile + absPath, err := filepath.Abs(c.devnetFile) + if err != nil { + absPath = c.devnetFile // Fallback to original path if abs fails + } + envVars[EnvFileVar] = absPath envVars[ChainNameVar] = c.name return &ChainEnv{ diff --git a/devnet-sdk/shell/env/env_test.go b/devnet-sdk/shell/env/env_test.go index f7f0510d444..a77773fa61c 100644 --- a/devnet-sdk/shell/env/env_test.go +++ b/devnet-sdk/shell/env/env_test.go @@ -189,7 +189,7 @@ func TestChainConfig(t *testing.T) { assert.Equal(t, "http://localhost:8545", env.EnvVars["ETH_RPC_URL"]) assert.Equal(t, "1234", env.EnvVars["ETH_RPC_JWT_SECRET"]) - assert.Equal(t, "test.json", env.EnvVars[EnvFileVar]) + assert.Equal(t, "test.json", filepath.Base(env.EnvVars[EnvFileVar])) assert.Equal(t, "test", env.EnvVars[ChainNameVar]) assert.Contains(t, env.Motd, "deployer") assert.Contains(t, env.Motd, "0x123") diff --git a/devnet-sdk/system/chain.go b/devnet-sdk/system/chain.go new file mode 100644 index 00000000000..5114fa22789 --- /dev/null +++ b/devnet-sdk/system/chain.go @@ -0,0 +1,142 @@ +package system + +import ( + "context" + "fmt" + "strconv" + "sync" + + "github.com/ethereum-optimism/optimism/devnet-sdk/constraints" + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts" + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/ethereum/go-ethereum/ethclient" +) + +// clientManager handles ethclient connections +type clientManager struct { + mu sync.RWMutex + clients map[string]*ethclient.Client +} + +func newClientManager() *clientManager { + return &clientManager{ + clients: make(map[string]*ethclient.Client), + } +} + +func (m *clientManager) getClient(rpcURL string) (*ethclient.Client, error) { + m.mu.RLock() + if client, ok := m.clients[rpcURL]; ok { + m.mu.RUnlock() + return client, nil + } + m.mu.RUnlock() + + m.mu.Lock() + defer m.mu.Unlock() + + // Double-check after acquiring write lock + if client, ok := m.clients[rpcURL]; ok { + return client, nil + } + + client, err := ethclient.Dial(rpcURL) + if err != nil { + return nil, fmt.Errorf("failed to connect to ethereum client: %w", err) + } + m.clients[rpcURL] = client + return client, nil +} + +type chain struct { + id string + rpcUrl string + + users map[string]types.Wallet + clients *clientManager + registry interfaces.ContractsRegistry + mu sync.Mutex +} + +func (c *chain) getClient() (*ethclient.Client, error) { + return c.clients.getClient(c.rpcUrl) +} + +func newChain(chainID string, rpcUrl string, users map[string]types.Wallet) *chain { + return &chain{ + id: chainID, + rpcUrl: rpcUrl, + users: users, + clients: newClientManager(), + } +} + +func (c *chain) ContractsRegistry() interfaces.ContractsRegistry { + c.mu.Lock() + defer c.mu.Unlock() + + if c.registry != nil { + return c.registry + } + + client, err := c.getClient() + if err != nil { + return contracts.NewEmptyRegistry() + } + + c.registry = contracts.NewClientRegistry(client) + return c.registry +} + +func (c *chain) RPCURL() string { + return c.rpcUrl +} + +// Wallet returns the first wallet which meets all provided constraints, or an +// error. +// Typically this will be one of the pre-funded wallets associated with +// the deployed system. +func (c *chain) Wallet(ctx context.Context, constraints ...constraints.WalletConstraint) (types.Wallet, error) { + // Try each user + for _, user := range c.users { + // Check all constraints + meetsAll := true + for _, constraint := range constraints { + if !constraint.CheckWallet(user) { + meetsAll = false + break + } + } + if meetsAll { + return user, nil + } + } + + return nil, fmt.Errorf("no user found meeting all constraints") +} + +func (c *chain) ID() types.ChainID { + if c.id == "" { + return types.ChainID(0) + } + id, _ := strconv.ParseUint(c.id, 10, 64) + return types.ChainID(id) +} + +func chainFromDescriptor(d *descriptors.Chain) Chain { + // TODO: handle incorrect descriptors better. We could panic here. + firstNodeRPC := d.Nodes[0].Services["el"].Endpoints["rpc"] + rpcURL := fmt.Sprintf("http://%s:%d", firstNodeRPC.Host, firstNodeRPC.Port) + + c := newChain(d.ID, rpcURL, nil) // Create chain first + + users := make(map[string]types.Wallet) + for key, w := range d.Wallets { + users[key] = newWallet(w.PrivateKey, types.Address(w.Address), c) + } + c.users = users // Set users after creation + + return c +} diff --git a/devnet-sdk/system/interfaces.go b/devnet-sdk/system/interfaces.go new file mode 100644 index 00000000000..6b1950fade3 --- /dev/null +++ b/devnet-sdk/system/interfaces.go @@ -0,0 +1,36 @@ +package system + +import ( + "context" + + "github.com/ethereum-optimism/optimism/devnet-sdk/constraints" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +// System represents a complete Optimism system with L1 and L2 chains +type System interface { + Identifier() string + L1() Chain + // TODO: fix the chain ID type + L2(uint64) Chain +} + +// Chain represents an Ethereum chain (L1 or L2) +type Chain interface { + RPCURL() string + ID() types.ChainID + Wallet(ctx context.Context, constraints ...constraints.WalletConstraint) (types.Wallet, error) + ContractsRegistry() interfaces.ContractsRegistry +} + +// InteropSystem extends System with interoperability features +type InteropSystem interface { + System + InteropSet() InteropSet +} + +// InteropSet provides access to L2 chains in an interop environment +type InteropSet interface { + L2(uint64) Chain +} diff --git a/devnet-sdk/system/system.go b/devnet-sdk/system/system.go new file mode 100644 index 00000000000..b431c8607c0 --- /dev/null +++ b/devnet-sdk/system/system.go @@ -0,0 +1,107 @@ +package system + +import ( + "encoding/json" + "fmt" + "os" + "slices" + "strings" + + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" +) + +type system struct { + identifier string + l1 Chain + l2s []Chain +} + +// system implements System +var _ System = (*system)(nil) + +func NewSystemFromEnv(envVar string) (System, error) { + devnetFile := os.Getenv(envVar) + if devnetFile == "" { + return nil, fmt.Errorf("env var '%s' is unset", envVar) + } + devnet, err := devnetFromFile(devnetFile) + if err != nil { + return nil, fmt.Errorf("failed to parse devnet file: %w", err) + } + + // Extract basename without extension from devnetFile path + basename := devnetFile + if lastSlash := strings.LastIndex(basename, "/"); lastSlash >= 0 { + basename = basename[lastSlash+1:] + } + if lastDot := strings.LastIndex(basename, "."); lastDot >= 0 { + basename = basename[:lastDot] + } + + sys, err := systemFromDevnet(*devnet, basename) + if err != nil { + return nil, fmt.Errorf("failed to create system from devnet file: %w", err) + } + return sys, nil +} + +func (s *system) L1() Chain { + return s.l1 +} + +func (s *system) L2(chainID uint64) Chain { + return s.l2s[chainID] +} + +func (s *system) Identifier() string { + return s.identifier +} + +func (s *system) addChains(chains ...*descriptors.Chain) error { + for _, chainDesc := range chains { + if chainDesc.ID == "" { + s.l1 = chainFromDescriptor(chainDesc) + } else { + s.l2s = append(s.l2s, chainFromDescriptor(chainDesc)) + } + } + return nil +} + +// devnetFromFile reads a DevnetEnvironment from a JSON file. +func devnetFromFile(devnetFile string) (*descriptors.DevnetEnvironment, error) { + data, err := os.ReadFile(devnetFile) + if err != nil { + return nil, fmt.Errorf("error reading devnet file: %w", err) + } + + var config descriptors.DevnetEnvironment + if err := json.Unmarshal(data, &config); err != nil { + return nil, fmt.Errorf("error parsing JSON: %w", err) + } + return &config, nil +} + +func systemFromDevnet(dn descriptors.DevnetEnvironment, identifier string) (System, error) { + sys := &system{identifier: identifier} + + if err := sys.addChains(append(dn.L2, dn.L1)...); err != nil { + return nil, err + } + + if slices.Contains(dn.Features, "interop") { + return &interopSystem{system: sys}, nil + } + return sys, nil +} + +type interopSystem struct { + *system +} + +// interopSystem implements InteropSystem +var _ InteropSystem = (*interopSystem)(nil) + +func (i *interopSystem) InteropSet() InteropSet { + return i.system // TODO: the interop set might not contain all L2s +} diff --git a/devnet-sdk/system/system_test.go b/devnet-sdk/system/system_test.go new file mode 100644 index 00000000000..cd5a7441ace --- /dev/null +++ b/devnet-sdk/system/system_test.go @@ -0,0 +1,282 @@ +package system + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" + "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewSystemFromEnv(t *testing.T) { + // Create a temporary devnet file + tempDir := t.TempDir() + devnetFile := filepath.Join(tempDir, "devnet.json") + + devnet := &descriptors.DevnetEnvironment{ + L1: &descriptors.Chain{ + ID: "1", + Nodes: []descriptors.Node{{ + Services: map[string]descriptors.Service{ + "el": { + Name: "geth", + Endpoints: descriptors.EndpointMap{ + "rpc": descriptors.PortInfo{ + Host: "localhost", + Port: 8545, + }, + }, + }, + }, + }}, + Wallets: descriptors.WalletMap{ + "default": descriptors.Wallet{ + Address: "0x123", + PrivateKey: "0xabc", + }, + }, + }, + L2: []*descriptors.Chain{{ + ID: "2", + Nodes: []descriptors.Node{{ + Services: map[string]descriptors.Service{ + "el": { + Name: "geth", + Endpoints: descriptors.EndpointMap{ + "rpc": descriptors.PortInfo{ + Host: "localhost", + Port: 8546, + }, + }, + }, + }, + }}, + Wallets: descriptors.WalletMap{ + "default": descriptors.Wallet{ + Address: "0x123", + PrivateKey: "0xabc", + }, + }, + }}, + Features: []string{}, + } + + data, err := json.Marshal(devnet) + require.NoError(t, err) + require.NoError(t, os.WriteFile(devnetFile, data, 0644)) + + // Test with valid environment + envVar := env.EnvFileVar + os.Setenv(envVar, devnetFile) + sys, err := NewSystemFromEnv(envVar) + assert.NoError(t, err) + assert.NotNil(t, sys) + + // Test with unset environment variable + os.Unsetenv(envVar) + sys, err = NewSystemFromEnv(envVar) + assert.Error(t, err) + assert.Nil(t, sys) +} + +func TestSystemFromDevnet(t *testing.T) { + testNode := descriptors.Node{ + Services: map[string]descriptors.Service{ + "el": { + Name: "geth", + Endpoints: descriptors.EndpointMap{ + "rpc": descriptors.PortInfo{ + Host: "localhost", + Port: 8545, + }, + }, + }, + }, + } + + testWallet := descriptors.Wallet{ + Address: "0x123", + PrivateKey: "0xabc", + } + + tests := []struct { + name string + devnet descriptors.DevnetEnvironment + wantErr bool + isInterop bool + }{ + { + name: "basic system", + devnet: descriptors.DevnetEnvironment{ + L1: &descriptors.Chain{ + ID: "1", + Nodes: []descriptors.Node{testNode}, + Wallets: descriptors.WalletMap{ + "default": testWallet, + }, + }, + L2: []*descriptors.Chain{{ + ID: "2", + Nodes: []descriptors.Node{testNode}, + Wallets: descriptors.WalletMap{ + "default": testWallet, + }, + }}, + }, + wantErr: false, + isInterop: false, + }, + { + name: "interop system", + devnet: descriptors.DevnetEnvironment{ + L1: &descriptors.Chain{ + ID: "1", + Nodes: []descriptors.Node{testNode}, + Wallets: descriptors.WalletMap{ + "default": testWallet, + }, + }, + L2: []*descriptors.Chain{{ + ID: "2", + Nodes: []descriptors.Node{testNode}, + Wallets: descriptors.WalletMap{ + "default": testWallet, + }, + }}, + Features: []string{"interop"}, + }, + wantErr: false, + isInterop: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sys, err := systemFromDevnet(tt.devnet, "test") + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.NotNil(t, sys) + + _, isInterop := sys.(InteropSystem) + assert.Equal(t, tt.isInterop, isInterop) + }) + } +} + +func TestDevnetFromFile(t *testing.T) { + // Create a temporary devnet file + tempDir := t.TempDir() + validFile := filepath.Join(tempDir, "valid.json") + invalidFile := filepath.Join(tempDir, "invalid.json") + + validDevnet := &descriptors.DevnetEnvironment{ + L1: &descriptors.Chain{ID: "1"}, + L2: []*descriptors.Chain{{ID: "2"}}, + } + + validData, err := json.Marshal(validDevnet) + require.NoError(t, err) + require.NoError(t, os.WriteFile(validFile, validData, 0644)) + + require.NoError(t, os.WriteFile(invalidFile, []byte("invalid json"), 0644)) + + tests := []struct { + name string + file string + wantErr bool + }{ + { + name: "valid file", + file: validFile, + wantErr: false, + }, + { + name: "invalid file", + file: invalidFile, + wantErr: true, + }, + { + name: "non-existent file", + file: "nonexistent.json", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devnet, err := devnetFromFile(tt.file) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, devnet) + } else { + assert.NoError(t, err) + assert.NotNil(t, devnet) + } + }) + } +} + +func TestWallet(t *testing.T) { + chain := newChain("1", "http://localhost:8545", nil) + + tests := []struct { + name string + privateKey types.Key + address types.Address + wantAddr types.Address + wantPrivKey types.Key + }{ + { + name: "valid wallet", + privateKey: "0xabc", + address: "0x123", + wantAddr: "0x123", + wantPrivKey: "abc", + }, + { + name: "empty wallet", + privateKey: "", + address: "", + wantAddr: "", + wantPrivKey: "", + }, + { + name: "only address", + privateKey: "", + address: "0x456", + wantAddr: "0x456", + wantPrivKey: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := newWallet(tt.privateKey, tt.address, chain) + assert.Equal(t, tt.wantAddr, w.Address()) + assert.Equal(t, tt.wantPrivKey, w.PrivateKey()) + }) + } +} + +func TestChainUser(t *testing.T) { + chain := newChain("1", "http://localhost:8545", nil) + testWallet := newWallet("0xabc", "0x123", chain) + chain.users = map[string]types.Wallet{ + "l2Faucet": testWallet, + } + + ctx := context.Background() + user, err := chain.Wallet(ctx) + assert.NoError(t, err) + assert.Equal(t, testWallet.Address(), user.Address()) + assert.Equal(t, testWallet.PrivateKey(), user.PrivateKey()) +} diff --git a/devnet-sdk/system/wallet.go b/devnet-sdk/system/wallet.go new file mode 100644 index 00000000000..e17a4e8267b --- /dev/null +++ b/devnet-sdk/system/wallet.go @@ -0,0 +1,200 @@ +package system + +import ( + "context" + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + coreTypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" +) + +// internalChain provides access to internal chain functionality +type internalChain interface { + Chain + getClient() (*ethclient.Client, error) +} + +type wallet struct { + privateKey types.Key + address types.Address + chain internalChain +} + +func newWallet(pk types.Key, addr types.Address, chain *chain) *wallet { + return &wallet{ + privateKey: pk, + address: addr, + chain: chain, + } +} + +func (w *wallet) PrivateKey() types.Key { + return strings.TrimPrefix(w.privateKey, "0x") +} + +func (w *wallet) Address() types.Address { + return w.address +} + +func (w *wallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { + return &sendImpl{ + chain: w.chain, + pk: w.PrivateKey(), + to: to, + amount: amount, + } +} + +func (w *wallet) Balance() types.Balance { + client, err := w.chain.getClient() + if err != nil { + return types.Balance{} + } + + balance, err := client.BalanceAt(context.Background(), common.HexToAddress(string(w.address)), nil) + if err != nil { + return types.Balance{} + } + + return types.NewBalance(balance) +} + +type sendImpl struct { + chain internalChain + pk types.Key + to types.Address + amount types.Balance +} + +func (i *sendImpl) Call(ctx context.Context) (any, error) { + client, err := i.chain.getClient() + if err != nil { + return nil, fmt.Errorf("failed to get client: %w", err) + } + + pk, err := crypto.HexToECDSA(string(i.pk)) + if err != nil { + return nil, fmt.Errorf("invalid private key: %w", err) + } + + from := crypto.PubkeyToAddress(pk.PublicKey) + nonce, err := client.PendingNonceAt(ctx, from) + if err != nil { + return nil, fmt.Errorf("failed to get nonce: %w", err) + } + + gasPrice, err := client.SuggestGasPrice(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get gas price: %w", err) + } + + // TODO: compute an accurate gas limit + gasLimit := uint64(210000) // 10x Standard ETH transfer gas limit + toAddr := common.HexToAddress(string(i.to)) + tx := coreTypes.NewTransaction(nonce, toAddr, i.amount.Int, gasLimit, gasPrice, nil) + + chainID, err := client.NetworkID(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get chain id: %w", err) + } + + signedTx, err := coreTypes.SignTx(tx, coreTypes.NewEIP155Signer(chainID), pk) + if err != nil { + return nil, fmt.Errorf("failed to sign transaction: %w", err) + } + + return signedTx, nil +} + +func (i *sendImpl) Send(ctx context.Context) types.InvocationResult { + tx, err := sendETH(ctx, i.chain, i.pk, i.to, i.amount) + return &sendResult{ + chain: i.chain, + tx: tx, + err: err, + } +} + +type sendResult struct { + chain internalChain + tx *coreTypes.Transaction + err error +} + +func (r *sendResult) Error() error { + return r.err +} + +func (r *sendResult) Wait() error { + client, err := r.chain.getClient() + if err != nil { + return fmt.Errorf("failed to get client: %w", err) + } + + if r.err != nil { + return r.err + } + if r.tx == nil { + return fmt.Errorf("no transaction to wait for") + } + + receipt, err := bind.WaitMined(context.Background(), client, r.tx) + if err != nil { + return fmt.Errorf("failed waiting for transaction confirmation: %w", err) + } + + if receipt.Status == 0 { + return fmt.Errorf("transaction failed") + } + + return nil +} + +func sendETH(ctx context.Context, chain internalChain, privateKey string, to types.Address, amount types.Balance) (*coreTypes.Transaction, error) { + client, err := chain.getClient() + if err != nil { + return nil, fmt.Errorf("failed to get client: %w", err) + } + + pk, err := crypto.HexToECDSA(privateKey) + if err != nil { + return nil, fmt.Errorf("invalid private key: %w", err) + } + + from := crypto.PubkeyToAddress(pk.PublicKey) + nonce, err := client.PendingNonceAt(ctx, from) + if err != nil { + return nil, fmt.Errorf("failed to get nonce: %w", err) + } + + gasPrice, err := client.SuggestGasPrice(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get gas price: %w", err) + } + + gasLimit := uint64(210000) // 10x Standard ETH transfer gas limit + toAddr := common.HexToAddress(string(to)) + tx := coreTypes.NewTransaction(nonce, toAddr, amount.Int, gasLimit, gasPrice, nil) + + chainID, err := client.NetworkID(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get chain id: %w", err) + } + + signedTx, err := coreTypes.SignTx(tx, coreTypes.NewEIP155Signer(chainID), pk) + if err != nil { + return nil, fmt.Errorf("failed to sign transaction: %w", err) + } + + err = client.SendTransaction(ctx, signedTx) + if err != nil { + return nil, fmt.Errorf("failed to send transaction: %w", err) + } + + return signedTx, nil +} diff --git a/devnet-sdk/testing/systest/provider.go b/devnet-sdk/testing/systest/provider.go new file mode 100644 index 00000000000..5415c438468 --- /dev/null +++ b/devnet-sdk/testing/systest/provider.go @@ -0,0 +1,18 @@ +package systest + +import "github.com/ethereum-optimism/optimism/devnet-sdk/system" + +// systemProvider defines the interface for package-level functionality +type systemProvider interface { + NewSystemFromEnv(string) (system.System, error) +} + +// defaultProvider is the default implementation of the package +type defaultProvider struct{} + +func (p *defaultProvider) NewSystemFromEnv(envVar string) (system.System, error) { + return system.NewSystemFromEnv(envVar) +} + +// currentPackage is the current package implementation +var currentPackage systemProvider = &defaultProvider{} diff --git a/devnet-sdk/testing/systest/systest.go b/devnet-sdk/testing/systest/systest.go new file mode 100644 index 00000000000..09b402e570c --- /dev/null +++ b/devnet-sdk/testing/systest/systest.go @@ -0,0 +1,49 @@ +package systest + +import ( + "context" + + "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" + "github.com/ethereum-optimism/optimism/devnet-sdk/system" +) + +type PreconditionValidator func(t T, sys system.System) (context.Context, error) + +type SystemTestFunc func(t T, sys system.System) + +func SystemTest(t BasicT, f SystemTestFunc, validators ...PreconditionValidator) { + wt := NewT(t) + wt.Helper() + + ctx, cancel := context.WithCancel(wt.Context()) + defer cancel() + + wt = wt.WithContext(ctx) + + sys, err := currentPackage.NewSystemFromEnv(env.EnvFileVar) + if err != nil { + t.Fatalf("failed to parse system from environment: %v", err) + } + + for _, validator := range validators { + ctx, err := validator(wt, sys) + if err != nil { + t.Skipf("validator failed: %v", err) + } + wt = wt.WithContext(ctx) + } + + f(wt, sys) +} + +type InteropSystemTestFunc func(t T, sys system.InteropSystem) + +func InteropSystemTest(t BasicT, f InteropSystemTestFunc, validators ...PreconditionValidator) { + SystemTest(t, func(t T, sys system.System) { + if sys, ok := sys.(system.InteropSystem); ok { + f(t, sys) + } else { + t.Skipf("interop test requested, but system is not an interop system") + } + }, validators...) +} diff --git a/devnet-sdk/testing/systest/tb.go b/devnet-sdk/testing/systest/tb.go new file mode 100644 index 00000000000..3580df5b31b --- /dev/null +++ b/devnet-sdk/testing/systest/tb.go @@ -0,0 +1,62 @@ +package systest + +import ( + "context" + "testing" + "time" +) + +// tbWrapper converts from testingTB to T +type tbWrapper struct { + testingTB + ctx context.Context +} + +var _ T = (*tbWrapper)(nil) + +func (t *tbWrapper) Context() context.Context { + t.Helper() + return t.ctx +} + +func (t *tbWrapper) WithContext(ctx context.Context) T { + t.Helper() + return &tbWrapper{ + testingTB: t.testingTB, + ctx: ctx, + } +} + +func (t *tbWrapper) Deadline() (deadline time.Time, ok bool) { + t.Helper() + if tt, ok := t.testingTB.(*testing.T); ok { + return tt.Deadline() + } + // TODO: get proper deadline + return time.Time{}, false +} + +func (t *tbWrapper) Parallel() { + t.Helper() + if tt, ok := t.testingTB.(*testing.T); ok { + tt.Parallel() + } + // TODO: implement ourselves. For now, just run sequentially +} + +func (t *tbWrapper) Run(name string, fn func(t T)) { + t.Helper() + if tt, ok := t.testingTB.(*testing.T); ok { + tt.Run(name, func(t *testing.T) { + fn(NewT(t)) + }) + } else { + // TODO: implement proper sub-tests reporting + done := make(chan struct{}) + go func() { + defer close(done) + fn(NewT(t)) + }() + <-done + } +} diff --git a/devnet-sdk/testing/systest/testing.go b/devnet-sdk/testing/systest/testing.go new file mode 100644 index 00000000000..1677165d66c --- /dev/null +++ b/devnet-sdk/testing/systest/testing.go @@ -0,0 +1,57 @@ +package systest + +import ( + "context" + "time" +) + +type BasicT = testingTB + +type testingTB interface { + Cleanup(func()) + Error(args ...any) + Errorf(format string, args ...any) + Fail() + Failed() bool + FailNow() + Fatal(args ...any) + Fatalf(format string, args ...any) + Helper() + Log(args ...any) + Logf(format string, args ...any) + Name() string + Setenv(key, value string) + Skip(args ...any) + SkipNow() + Skipf(format string, args ...any) + Skipped() bool + TempDir() string +} + +type tContext interface { + Context() context.Context +} + +type T interface { + testingTB + Context() context.Context + WithContext(ctx context.Context) T + Deadline() (deadline time.Time, ok bool) + Parallel() + Run(string, func(t T)) +} + +func NewT(t testingTB) T { + t.Helper() + if tt, ok := t.(T); ok { + return tt + } + ctx := context.TODO() + if tt, ok := t.(tContext); ok { + ctx = tt.Context() + } + return &tbWrapper{ + testingTB: t, + ctx: ctx, + } +} diff --git a/devnet-sdk/testing/systest/testing_test.go b/devnet-sdk/testing/systest/testing_test.go new file mode 100644 index 00000000000..1aa49d9b156 --- /dev/null +++ b/devnet-sdk/testing/systest/testing_test.go @@ -0,0 +1,253 @@ +package systest + +import ( + "context" + "os" + "testing" + + "github.com/ethereum-optimism/optimism/devnet-sdk/constraints" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" + "github.com/ethereum-optimism/optimism/devnet-sdk/system" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/stretchr/testify/require" +) + +// mockTB implements a minimal testing.TB for testing +type mockTB struct { + testing.TB + name string +} + +func (m *mockTB) Helper() {} +func (m *mockTB) Name() string { return m.name } +func (m *mockTB) Cleanup(func()) {} +func (m *mockTB) Error(args ...any) {} +func (m *mockTB) Errorf(string, ...any) {} +func (m *mockTB) Fail() {} +func (m *mockTB) FailNow() {} +func (m *mockTB) Failed() bool { return false } +func (m *mockTB) Fatal(args ...any) {} +func (m *mockTB) Fatalf(string, ...any) {} +func (m *mockTB) Log(args ...any) {} +func (m *mockTB) Logf(string, ...any) {} +func (m *mockTB) Skip(args ...any) {} +func (m *mockTB) SkipNow() {} +func (m *mockTB) Skipf(string, ...any) {} +func (m *mockTB) Skipped() bool { return false } +func (m *mockTB) TempDir() string { return "" } +func (m *mockTB) Setenv(key, value string) {} + +// mockChain implements a minimal system.Chain for testing +type mockChain struct{} + +func (m *mockChain) RPCURL() string { return "http://localhost:8545" } +func (m *mockChain) ID() types.ChainID { return types.ChainID(1) } +func (m *mockChain) ContractsRegistry() interfaces.ContractsRegistry { return nil } +func (m *mockChain) Wallet(ctx context.Context, constraints ...constraints.WalletConstraint) (types.Wallet, error) { + return nil, nil +} + +// mockSystem implements a minimal system.System for testing +type mockSystem struct{} + +func (m *mockSystem) Identifier() string { return "mock" } +func (m *mockSystem) L1() system.Chain { return &mockChain{} } +func (m *mockSystem) L2(uint64) system.Chain { return &mockChain{} } +func (m *mockSystem) Close() error { return nil } + +// mockInteropSet implements a minimal system.InteropSet for testing +type mockInteropSet struct{} + +func (m *mockInteropSet) L2(uint64) system.Chain { return &mockChain{} } + +// mockInteropSystem implements a minimal system.InteropSystem for testing +type mockInteropSystem struct { + mockSystem +} + +func (m *mockInteropSystem) InteropSet() system.InteropSet { return &mockInteropSet{} } + +// newMockSystem creates a new mock system for testing +func newMockSystem() system.System { + return &mockSystem{} +} + +// newMockInteropSystem creates a new mock interop system for testing +func newMockInteropSystem() system.InteropSystem { + return &mockInteropSystem{} +} + +// testSystemCreator is a function that creates a system for testing +type testSystemCreator func() (system.System, error) + +// testPackage is a test-specific implementation of the package +type testPackage struct { + creator testSystemCreator +} + +func (p *testPackage) NewSystemFromEnv(string) (system.System, error) { + return p.creator() +} + +// withTestSystem runs a test with a custom system creator +func withTestSystem(t *testing.T, creator testSystemCreator, f func(t *testing.T)) { + // Save original env var + origEnvFile := os.Getenv(env.EnvFileVar) + defer os.Setenv(env.EnvFileVar, origEnvFile) + + // Set empty env var for testing + os.Setenv(env.EnvFileVar, "") + + // Create a test-specific package + pkg := &testPackage{creator: creator} + origPkg := currentPackage + currentPackage = pkg + defer func() { + currentPackage = origPkg + }() + + f(t) +} + +// TestNewT tests the creation and basic functionality of the test wrapper +func TestNewT(t *testing.T) { + t.Run("wraps *testing.T correctly", func(t *testing.T) { + wrapped := NewT(t) + require.NotNil(t, wrapped) + require.NotNil(t, wrapped.Context()) + }) + + t.Run("preserves existing T implementation", func(t *testing.T) { + original := NewT(t) + wrapped := NewT(original) + require.Equal(t, original, wrapped) + }) +} + +// TestTWrapper tests the tbWrapper functionality +func TestTWrapper(t *testing.T) { + t.Run("context operations", func(t *testing.T) { + wrapped := NewT(t) + key := &struct{}{} + ctx := context.WithValue(context.Background(), key, "value") + newWrapped := wrapped.WithContext(ctx) + + require.NotEqual(t, wrapped, newWrapped) + require.Equal(t, "value", newWrapped.Context().Value(key)) + }) + + t.Run("deadline", func(t *testing.T) { + mock := &mockTB{name: "mock"} + wrapped := NewT(mock) + deadline, ok := wrapped.Deadline() + require.False(t, ok, "deadline should not be set") + require.True(t, deadline.IsZero(), "deadline should be zero time") + }) + + t.Run("parallel execution", func(t *testing.T) { + wrapped := NewT(t) + // Should not panic + wrapped.Parallel() + }) + + t.Run("sub-tests", func(t *testing.T) { + wrapped := NewT(t) + subTestCalled := false + wrapped.Run("sub-test", func(t T) { + subTestCalled = true + require.NotNil(t, t) + require.NotNil(t, t.Context()) + }) + require.True(t, subTestCalled) + }) + + t.Run("nested sub-tests", func(t *testing.T) { + wrapped := NewT(t) + level1Called := false + level2Called := false + + wrapped.Run("level-1", func(t T) { + level1Called = true + t.Run("level-2", func(t T) { + level2Called = true + }) + }) + + require.True(t, level1Called) + require.True(t, level2Called) + }) +} + +// TestSystemTest tests the main SystemTest function +func TestSystemTest(t *testing.T) { + withTestSystem(t, func() (system.System, error) { + return newMockSystem(), nil + }, func(t *testing.T) { + t.Run("basic system test", func(t *testing.T) { + called := false + SystemTest(t, func(t T, sys system.System) { + called = true + require.NotNil(t, sys) + }) + require.True(t, called) + }) + + t.Run("with validator", func(t *testing.T) { + validatorCalled := false + testCalled := false + + validator := func(t T, sys system.System) (context.Context, error) { + validatorCalled = true + return t.Context(), nil + } + + SystemTest(t, func(t T, sys system.System) { + testCalled = true + }, validator) + + require.True(t, validatorCalled) + require.True(t, testCalled) + }) + + t.Run("multiple validators", func(t *testing.T) { + validatorCount := 0 + + validator := func(t T, sys system.System) (context.Context, error) { + validatorCount++ + return t.Context(), nil + } + + SystemTest(t, func(t T, sys system.System) {}, validator, validator, validator) + require.Equal(t, 3, validatorCount) + }) + }) +} + +// TestInteropSystemTest tests the InteropSystemTest function +func TestInteropSystemTest(t *testing.T) { + t.Run("skips non-interop system", func(t *testing.T) { + withTestSystem(t, func() (system.System, error) { + return newMockSystem(), nil + }, func(t *testing.T) { + called := false + InteropSystemTest(t, func(t T, sys system.InteropSystem) { + called = true + }) + require.False(t, called) + }) + }) + + t.Run("runs with interop system", func(t *testing.T) { + withTestSystem(t, func() (system.System, error) { + return newMockInteropSystem(), nil + }, func(t *testing.T) { + called := false + InteropSystemTest(t, func(t T, sys system.InteropSystem) { + called = true + require.NotNil(t, sys.InteropSet()) + }) + require.True(t, called) + }) + }) +} diff --git a/devnet-sdk/types/balance.go b/devnet-sdk/types/balance.go new file mode 100644 index 00000000000..3b328731da6 --- /dev/null +++ b/devnet-sdk/types/balance.go @@ -0,0 +1,75 @@ +package types + +import ( + "fmt" + "log/slog" + "math/big" +) + +type Balance struct { + *big.Int +} + +// NewBalance creates a new Balance from a big.Int +func NewBalance(i *big.Int) Balance { + return Balance{Int: new(big.Int).Set(i)} +} + +// Add returns a new Balance with other added to it +func (b Balance) Add(other Balance) Balance { + return Balance{Int: new(big.Int).Add(b.Int, other.Int)} +} + +// Sub returns a new Balance with other subtracted from it +func (b Balance) Sub(other Balance) Balance { + return Balance{Int: new(big.Int).Sub(b.Int, other.Int)} +} + +// Mul returns a new Balance multiplied by a float64 +func (b Balance) Mul(f float64) Balance { + floatResult := new(big.Float).Mul(new(big.Float).SetInt(b.Int), new(big.Float).SetFloat64(f)) + result := new(big.Int) + floatResult.Int(result) + return Balance{Int: result} +} + +// GreaterThan returns true if this balance is greater than other +func (b Balance) GreaterThan(other Balance) bool { + return b.Int.Cmp(other.Int) > 0 +} + +// LessThan returns true if this balance is less than other +func (b Balance) LessThan(other Balance) bool { + return b.Int.Cmp(other.Int) < 0 +} + +// Equal returns true if this balance equals other +func (b Balance) Equal(other Balance) bool { + return b.Int.Cmp(other.Int) == 0 +} + +// LogValue implements slog.LogValuer to format Balance in the most readable unit +func (b Balance) LogValue() slog.Value { + if b.Int == nil { + return slog.StringValue("0 ETH") + } + + val := new(big.Float).SetInt(b.Int) + eth := new(big.Float).Quo(val, new(big.Float).SetInt64(1e18)) + + // 1 ETH = 1e18 Wei + if eth.Cmp(new(big.Float).SetFloat64(0.001)) >= 0 { + str := eth.Text('g', 3) + return slog.StringValue(fmt.Sprintf("%s ETH", str)) + } + + // 1 Gwei = 1e9 Wei + gwei := new(big.Float).Quo(val, new(big.Float).SetInt64(1e9)) + if gwei.Cmp(new(big.Float).SetFloat64(0.001)) >= 0 { + str := gwei.Text('g', 3) + return slog.StringValue(fmt.Sprintf("%s Gwei", str)) + } + + // Wei + return slog.StringValue(fmt.Sprintf("%s Wei", b.Text(10))) +} diff --git a/devnet-sdk/types/balance_test.go b/devnet-sdk/types/balance_test.go new file mode 100644 index 00000000000..0a7a6c1b216 --- /dev/null +++ b/devnet-sdk/types/balance_test.go @@ -0,0 +1,147 @@ +package types + +import ( + "math/big" + "testing" +) + +func TestNewBalance(t *testing.T) { + i := big.NewInt(100) + b := NewBalance(i) + if b.Int.Cmp(i) != 0 { + t.Errorf("NewBalance failed, got %v, want %v", b.Int, i) + } + + // Verify that modifying the input doesn't affect the Balance + i.SetInt64(200) + if b.Int.Cmp(big.NewInt(100)) != 0 { + t.Error("NewBalance did not create a copy of the input") + } +} + +func TestBalance_Add(t *testing.T) { + tests := []struct { + a, b, want int64 + }{ + {100, 200, 300}, + {0, 100, 100}, + {-100, 100, 0}, + {1000000, 2000000, 3000000}, + } + + for _, tt := range tests { + a := NewBalance(big.NewInt(tt.a)) + b := NewBalance(big.NewInt(tt.b)) + got := a.Add(b) + want := NewBalance(big.NewInt(tt.want)) + if !got.Equal(want) { + t.Errorf("Add(%v, %v) = %v, want %v", tt.a, tt.b, got, want) + } + // Verify original balances weren't modified + if !a.Equal(NewBalance(big.NewInt(tt.a))) { + t.Error("Add modified original balance") + } + } +} + +func TestBalance_Sub(t *testing.T) { + tests := []struct { + a, b, want int64 + }{ + {300, 200, 100}, + {100, 100, 0}, + {0, 100, -100}, + {3000000, 2000000, 1000000}, + } + + for _, tt := range tests { + a := NewBalance(big.NewInt(tt.a)) + b := NewBalance(big.NewInt(tt.b)) + got := a.Sub(b) + want := NewBalance(big.NewInt(tt.want)) + if !got.Equal(want) { + t.Errorf("Sub(%v, %v) = %v, want %v", tt.a, tt.b, got, want) + } + } +} + +func TestBalance_Mul(t *testing.T) { + tests := []struct { + a int64 + mul float64 + want int64 + }{ + {100, 2.0, 200}, + {100, 0.5, 50}, + {100, 0.0, 0}, + {1000, 1.5, 1500}, + } + + for _, tt := range tests { + a := NewBalance(big.NewInt(tt.a)) + got := a.Mul(tt.mul) + want := NewBalance(big.NewInt(tt.want)) + if !got.Equal(want) { + t.Errorf("Mul(%v, %v) = %v, want %v", tt.a, tt.mul, got, want) + } + } +} + +func TestBalance_Comparisons(t *testing.T) { + tests := []struct { + a, b int64 + gt, lt, eq bool + }{ + {100, 200, false, true, false}, + {200, 100, true, false, false}, + {100, 100, false, false, true}, + {0, 100, false, true, false}, + } + + for _, tt := range tests { + a := NewBalance(big.NewInt(tt.a)) + b := NewBalance(big.NewInt(tt.b)) + + if got := a.GreaterThan(b); got != tt.gt { + t.Errorf("GreaterThan(%v, %v) = %v, want %v", tt.a, tt.b, got, tt.gt) + } + + if got := a.LessThan(b); got != tt.lt { + t.Errorf("LessThan(%v, %v) = %v, want %v", tt.a, tt.b, got, tt.lt) + } + + if got := a.Equal(b); got != tt.eq { + t.Errorf("Equal(%v, %v) = %v, want %v", tt.a, tt.b, got, tt.eq) + } + } +} + +func TestBalance_LogValue(t *testing.T) { + tests := []struct { + wei string // Using string to handle large numbers + want string + }{ + {"2000000000000000000", "2 ETH"}, // 2 ETH + {"1000000000", "1 Gwei"}, // 1 Gwei + {"100", "100 Wei"}, // 100 Wei + {"1500000000000000000", "1.5 ETH"}, // 1.5 ETH + {"0", "0 Wei"}, // 0 + } + + for _, tt := range tests { + i := new(big.Int) + i.SetString(tt.wei, 10) + b := NewBalance(i) + got := b.LogValue().String() + if got != tt.want { + t.Errorf("LogValue() for %v Wei = %v, want %v", tt.wei, got, tt.want) + } + } + + // Test nil case + var nilBalance Balance + got := nilBalance.LogValue().String() + if got != "0 ETH" { + t.Errorf("LogValue() for nil balance = %v, want '0 ETH'", got) + } +} diff --git a/devnet-sdk/types/types.go b/devnet-sdk/types/types.go new file mode 100644 index 00000000000..9a6123c9900 --- /dev/null +++ b/devnet-sdk/types/types.go @@ -0,0 +1,32 @@ +package types + +import ( + "context" +) + +type Address string + +type ChainID uint64 + +type ReadInvocation[T any] interface { + Call(ctx context.Context) (T, error) +} + +type WriteInvocation[T any] interface { + ReadInvocation[T] + Send(ctx context.Context) InvocationResult +} + +type InvocationResult interface { + Error() error + Wait() error +} + +type Wallet interface { + PrivateKey() Key + Address() Address + SendETH(to Address, amount Balance) WriteInvocation[any] + Balance() Balance +} + +type Key = string diff --git a/docker-bake.hcl b/docker-bake.hcl index 54af855b193..ea19a31131e 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -230,22 +230,6 @@ target "holocene-deployer" { tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/holocene-deployer:${tag}"] } -target "ci-builder" { - dockerfile = "./ops/docker/ci-builder/Dockerfile" - context = "." - platforms = split(",", PLATFORMS) - target="base-builder" - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder:${tag}"] -} - -target "ci-builder-rust" { - dockerfile = "./ops/docker/ci-builder/Dockerfile" - context = "." - platforms = split(",", PLATFORMS) - target="rust-builder" - tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/ci-builder-rust:${tag}"] -} - target "op-deployer" { dockerfile = "ops/docker/op-stack-go/Dockerfile" context = "." diff --git a/go.mod b/go.mod index 384140c4765..ad55be65ef5 100644 --- a/go.mod +++ b/go.mod @@ -10,12 +10,12 @@ require ( github.com/bmatcuk/doublestar/v4 v4.8.1 github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 - github.com/cockroachdb/pebble v1.1.3 - github.com/consensys/gnark-crypto v0.14.0 + github.com/cockroachdb/pebble v1.1.4 + github.com/consensys/gnark-crypto v0.15.0 github.com/crate-crypto/go-kzg-4844 v1.1.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 - github.com/ethereum/go-ethereum v1.14.12 + github.com/ethereum/go-ethereum v1.15.0 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.8.0 github.com/go-task/slim-sprig/v3 v3.0.0 @@ -35,9 +35,10 @@ require ( github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.12.0 github.com/libp2p/go-libp2p-testing v0.12.0 - github.com/lmittmann/w3 v0.17.4 + github.com/lmittmann/w3 v0.17.5 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.81 + github.com/minio/minio-go/v7 v7.0.84 + github.com/minio/sha256-simd v1.0.1 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.14.0 github.com/multiformats/go-multiaddr-dns v0.4.1 @@ -46,6 +47,7 @@ require ( github.com/pkg/profile v1.7.0 github.com/prometheus/client_golang v1.20.5 github.com/protolambda/ctxlock v0.1.0 + github.com/schollz/progressbar/v3 v3.18.0 github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.27.5 golang.org/x/crypto v0.32.0 @@ -53,7 +55,6 @@ require ( golang.org/x/sync v0.10.0 golang.org/x/term v0.28.0 golang.org/x/time v0.9.0 - gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -67,7 +68,7 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.17.0 // indirect + github.com/bits-and-blooms/bitset v1.20.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/btcsuite/btcd/btcutil v1.1.5 // indirect @@ -78,7 +79,7 @@ require ( github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.22 // indirect + github.com/consensys/bavard v0.1.27 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect @@ -108,7 +109,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-yaml/yaml v2.1.0+incompatible // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -139,7 +140,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect @@ -159,13 +160,13 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mholt/archiver v3.1.1+incompatible // indirect github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.1 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect @@ -200,7 +201,9 @@ require ( github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect + github.com/pion/stun/v2 v2.0.0 // indirect github.com/pion/transport/v2 v2.2.10 // indirect + github.com/pion/transport/v3 v3.0.7 // indirect github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -247,11 +250,12 @@ require ( google.golang.org/grpc v1.57.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect lukechampine.com/blake3 v1.3.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101412.1-rc.1 +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101500.0-rc.1 //replace github.com/ethereum/go-ethereum => ../op-geth diff --git a/go.sum b/go.sum index 309beb8c4d0..3133044f613 100644 --- a/go.sum +++ b/go.sum @@ -49,8 +49,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.17.0 h1:1X2TS7aHz1ELcC0yU1y2stUs/0ig5oMU6STFZGrhvHI= -github.com/bits-and-blooms/bitset v1.17.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU= +github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= @@ -92,6 +92,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chengxilo/virtualterm v1.0.4 h1:Z6IpERbRVlfB8WkOmtbHiDbBANU7cimRIof7mk9/PwM= +github.com/chengxilo/virtualterm v1.0.4/go.mod h1:DyxxBZz/x1iqJjFxTFcr6/x+jSpqN0iwWCOK1q10rlY= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -116,16 +118,16 @@ github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/e github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v1.1.3 h1:GM5YY3Yb09KCGUQoyWdi3vsLErXHsmc3qRRWsX+tBqw= -github.com/cockroachdb/pebble v1.1.3/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= +github.com/cockroachdb/pebble v1.1.4 h1:5II1uEP4MyHLDnsrbv/EZ36arcb9Mxg3n+owhZ3GrG8= +github.com/cockroachdb/pebble v1.1.4/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= -github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI0A= -github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E= -github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0= +github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs= +github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= +github.com/consensys/gnark-crypto v0.15.0 h1:OXsWnhheHV59eXIzhL5OIexa/vqTK8wtRYQCtwfMDtY= +github.com/consensys/gnark-crypto v0.15.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -190,8 +192,8 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= -github.com/ethereum-optimism/op-geth v1.101412.1-rc.1 h1:xXsoOKpgChoUfrzml9QTKdFTDnRAGMsZoQ/FyihJAh0= -github.com/ethereum-optimism/op-geth v1.101412.1-rc.1/go.mod h1:vgZU+rg5NYY/Dfs7oqjWxTBSrHZkHaSPd/HiZWNcw4o= +github.com/ethereum-optimism/op-geth v1.101500.0-rc.1 h1:MiTFqCTU/Zi6Apyu5ODFxfJomSUJ1oCbdLUbGm4sWpw= +github.com/ethereum-optimism/op-geth v1.101500.0-rc.1/go.mod h1:OMpyVMMy5zpAAHlR5s/aGbXRk+7cIKczUEIJj54APbY= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8= @@ -255,8 +257,8 @@ github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaL github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -432,8 +434,8 @@ github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IX github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -495,8 +497,8 @@ github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQsc github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= -github.com/lmittmann/w3 v0.17.4 h1:nHaX3EymuMoup5eQ/qpU4YVR67boSDGewGQt90DgJng= -github.com/lmittmann/w3 v0.17.4/go.mod h1:+NTGtk54BK1W7572qRaiZ9ywVSaKeObg490bUVvKjAs= +github.com/lmittmann/w3 v0.17.5 h1:5BHLlts5NjZwOAxF0ZU/BiVvJ4+gKBmQOjTS/VDq25g= +github.com/lmittmann/w3 v0.17.5/go.mod h1:pZGfIFPneOcrpOhcKhtnYKlldwj2jUiTJ2uN4Aq6qg0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -521,8 +523,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mholt/archiver v3.1.1+incompatible h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU= github.com/mholt/archiver v3.1.1+incompatible/go.mod h1:Dh2dOXnSdiLxRiPoVfIr/fI1TwETms9B8CTWfeh7ROU= @@ -538,11 +540,13 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= -github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.84 h1:D1HVmAF8JF8Bpi6IU4V9vIEj+8pc+xU88EWMs2yed0E= +github.com/minio/minio-go/v7 v7.0.84/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -663,6 +667,8 @@ github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0= +github.com/pion/stun/v2 v2.0.0/go.mod h1:22qRSh08fSEttYUmJZGlriq9+03jtVmXNODgLccj8GQ= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= @@ -745,6 +751,8 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= +github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= diff --git a/kurtosis-devnet/README.md b/kurtosis-devnet/README.md index 23bd5d0e3e0..d0efc34b2ab 100644 --- a/kurtosis-devnet/README.md +++ b/kurtosis-devnet/README.md @@ -4,8 +4,8 @@ Running a Kurtosis Devnet has the following prerequisites: - Kurtosis must be installed - Docker Desktop must be installed and running -Platform specific installation instructions for Kurtosis may be found [in Kurtosis documentation](https://docs.kurtosis.com/install/), -but for Mac users, the following command should suffice: +Platform specific installation instructions for Kurtosis may be found [in Kurtosis documentation](https://docs.kurtosis.com/install/), alternatively Mac, Windows and Linux binaries can be found [here](https://github.com/kurtosis-tech/kurtosis-cli-release-artifacts). +For Mac users, the following command should suffice: ``` brew install kurtosis-tech/tap/kurtosis-cli ``` @@ -16,7 +16,6 @@ Docker Desktop may be substituted by an alternative like Orbstack if you have th # Running A Devnet To see available devnets, consult the `justfile` to see what `.*-devnet` targets exist, currently -- `mini-devnet` - `simple-devnet` - `interop-devnet` - `user-devnet` diff --git a/kurtosis-devnet/justfile b/kurtosis-devnet/justfile index cbdc1b53582..a47238c4594 100644 --- a/kurtosis-devnet/justfile +++ b/kurtosis-devnet/justfile @@ -73,9 +73,6 @@ devnet-test DEVNET *TEST: # Devnet recipes -# Mini devnet -mini-devnet: (devnet "mini.yaml") - # Simple devnet simple-devnet: (devnet "simple.yaml") diff --git a/kurtosis-devnet/mini.yaml b/kurtosis-devnet/mini.yaml deleted file mode 100644 index 51919822dd7..00000000000 --- a/kurtosis-devnet/mini.yaml +++ /dev/null @@ -1,68 +0,0 @@ -optimism_package: - chains: - - participants: - - el_type: op-geth - el_image: "" - el_log_level: "" - el_extra_env_vars: {} - el_extra_labels: {} - el_extra_params: [] - el_tolerations: [] - el_volume_size: 0 - el_min_cpu: 0 - el_max_cpu: 0 - el_min_mem: 0 - el_max_mem: 0 - cl_type: op-node - cl_image: "" - cl_log_level: "" - cl_extra_env_vars: {} - cl_extra_labels: {} - cl_extra_params: [] - cl_tolerations: [] - cl_volume_size: 0 - cl_min_cpu: 0 - cl_max_cpu: 0 - cl_min_mem: 0 - cl_max_mem: 0 - node_selectors: {} - tolerations: [] - count: 1 - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - name: "op-kurtosis" - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: "" - extra_params: [] - mev_params: - rollup_boost_image: "" - builder_host: "" - builder_port: "" - additional_services: [] - op_contract_deployer_params: - image: opsigma/op-deployer:v0.0.7-http - l1_artifacts_locator: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-9af7366a7102f51e8dbe451dcfa22971131d89e218915c91f420a164cc48be65.tar.gz - l2_artifacts_locator: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-9af7366a7102f51e8dbe451dcfa22971131d89e218915c91f420a164cc48be65.tar.gz - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/pkg/deploy/prestate.go b/kurtosis-devnet/pkg/deploy/prestate.go index c0a080d3bae..1b06c76ada5 100644 --- a/kurtosis-devnet/pkg/deploy/prestate.go +++ b/kurtosis-devnet/pkg/deploy/prestate.go @@ -53,7 +53,6 @@ func (h *localPrestateHolder) GetPrestateInfo() (*PrestateInfo, error) { fileToKey := map[string]string{ "prestate-proof.json": "prestate", "prestate-proof-mt64.json": "prestate_mt64", - "prestate-proof-mt.json": "prestate_mt", "prestate-proof-interop.json": "prestate_interop", } diff --git a/kurtosis-devnet/pkg/deploy/prestate_test.go b/kurtosis-devnet/pkg/deploy/prestate_test.go index aa825bd720e..ec485d902be 100644 --- a/kurtosis-devnet/pkg/deploy/prestate_test.go +++ b/kurtosis-devnet/pkg/deploy/prestate_test.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) func TestLocalPrestate(t *testing.T) { diff --git a/kurtosis-devnet/tests/interop/boilerplate_test.go b/kurtosis-devnet/tests/interop/boilerplate_test.go new file mode 100644 index 00000000000..fd9114f6799 --- /dev/null +++ b/kurtosis-devnet/tests/interop/boilerplate_test.go @@ -0,0 +1,28 @@ +package interop + +import ( + "context" + "fmt" + "log/slog" + "os" + + "github.com/ethereum-optimism/optimism/devnet-sdk/constraints" + "github.com/ethereum-optimism/optimism/devnet-sdk/system" + "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +func init() { + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug}))) +} + +func walletFundsValidator(chainIdx uint64, minFunds types.Balance, userMarker interface{}) systest.PreconditionValidator { + return func(t systest.T, sys system.System) (context.Context, error) { + chain := sys.L2(chainIdx) + user, err := chain.Wallet(t.Context(), constraints.WithBalance(minFunds)) + if err != nil { + return nil, fmt.Errorf("No available wallet with funds: %w", err) + } + return context.WithValue(t.Context(), userMarker, user), nil + } +} diff --git a/kurtosis-devnet/tests/interop/interop_smoke_test.go b/kurtosis-devnet/tests/interop/interop_smoke_test.go new file mode 100644 index 00000000000..2129ae51c34 --- /dev/null +++ b/kurtosis-devnet/tests/interop/interop_smoke_test.go @@ -0,0 +1,91 @@ +package interop + +import ( + "context" + "log/slog" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/devnet-sdk/system" + "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" + "github.com/stretchr/testify/require" +) + +func smokeTestScenario(chainIdx uint64, userSentinel interface{}) systest.SystemTestFunc { + return func(t systest.T, sys system.System) { + ctx := t.Context() + logger := slog.With("test", "TestMinimal", "devnet", sys.Identifier()) + + chain := sys.L2(chainIdx) + logger = logger.With("chain", chain.ID()) + logger.InfoContext(ctx, "starting test") + + funds := types.NewBalance(big.NewInt(0.5 * constants.ETH)) + user := ctx.Value(userSentinel).(types.Wallet) + + scw0Addr := constants.SuperchainWETH + scw0, err := chain.ContractsRegistry().SuperchainWETH(scw0Addr) + require.NoError(t, err) + logger.InfoContext(ctx, "using SuperchainWETH", "contract", scw0Addr) + + initialBalance, err := scw0.BalanceOf(user.Address()).Call(ctx) + require.NoError(t, err) + logger = logger.With("user", user.Address()) + logger.InfoContext(ctx, "initial balance retrieved", "balance", initialBalance) + + logger.InfoContext(ctx, "sending ETH to contract", "amount", funds) + require.NoError(t, user.SendETH(scw0Addr, funds).Send(ctx).Wait()) + + balance, err := scw0.BalanceOf(user.Address()).Call(ctx) + require.NoError(t, err) + logger.InfoContext(ctx, "final balance retrieved", "balance", balance) + + require.Equal(t, balance, initialBalance.Add(funds)) + } +} + +func TestSystemWrapETH(t *testing.T) { + chainIdx := uint64(0) // We'll use the first L2 chain for this test + testUserMarker := &struct{}{} // Sentinel for the user context value + + systest.SystemTest(t, + smokeTestScenario(chainIdx, testUserMarker), + walletFundsValidator(chainIdx, types.NewBalance(big.NewInt(1.0*constants.ETH)), testUserMarker), + ) +} + +func TestInteropSystemNoop(t *testing.T) { + systest.InteropSystemTest(t, func(t systest.T, sys system.InteropSystem) { + slog.Info("noop") + }) +} + +func TestSmokeTestFailure(t *testing.T) { + // Create mock failing system + mockAddr := types.Address("0x1234567890123456789012345678901234567890") + mockWallet := &mockFailingWallet{ + addr: mockAddr, + key: "mock-key", + bal: types.NewBalance(big.NewInt(1000000)), + } + mockChain := &mockFailingChain{ + id: types.ChainID(1234), + wallet: mockWallet, + reg: &mockRegistry{}, + } + mockSys := &mockFailingSystem{chain: mockChain} + + // Run the smoke test logic and capture failures + sentinel := &struct{}{} + rt := NewRecordingT(context.WithValue(context.TODO(), sentinel, mockWallet)) + rt.TestScenario( + smokeTestScenario(0, sentinel), + mockSys, + ) + + // Verify that the test failed due to SendETH error + require.True(t, rt.Failed(), "test should have failed") + require.Contains(t, rt.Logs(), "transaction failure", "unexpected failure message") +} diff --git a/kurtosis-devnet/tests/interop/mocks_test.go b/kurtosis-devnet/tests/interop/mocks_test.go new file mode 100644 index 00000000000..47c607bf516 --- /dev/null +++ b/kurtosis-devnet/tests/interop/mocks_test.go @@ -0,0 +1,258 @@ +package interop + +import ( + "bytes" + "context" + "fmt" + "math/big" + "os" + "runtime" + "time" + + "github.com/ethereum-optimism/optimism/devnet-sdk/constraints" + "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" + "github.com/ethereum-optimism/optimism/devnet-sdk/system" + "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/devnet-sdk/types" +) + +// mockFailingTx implements types.WriteInvocation[any] that always fails +type mockFailingTx struct{} + +func (m *mockFailingTx) Call(ctx context.Context) (any, error) { + return nil, fmt.Errorf("simulated transaction failure") +} + +func (m *mockFailingTx) Send(ctx context.Context) types.InvocationResult { + return m +} + +func (m *mockFailingTx) Error() error { + return fmt.Errorf("transaction failure") +} + +func (m *mockFailingTx) Wait() error { + return fmt.Errorf("transaction failure") +} + +// mockFailingWallet implements types.Wallet that fails on SendETH +type mockFailingWallet struct { + addr types.Address + key types.Key + bal types.Balance +} + +func (m *mockFailingWallet) Address() types.Address { + return m.addr +} + +func (m *mockFailingWallet) PrivateKey() types.Key { + return m.key +} + +func (m *mockFailingWallet) Balance() types.Balance { + return m.bal +} + +func (m *mockFailingWallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { + return &mockFailingTx{} +} + +// mockFailingChain implements system.Chain with a failing SendETH +type mockFailingChain struct { + id types.ChainID + wallet types.Wallet + reg interfaces.ContractsRegistry +} + +func (m *mockFailingChain) RPCURL() string { return "mock://failing" } +func (m *mockFailingChain) ID() types.ChainID { return m.id } +func (m *mockFailingChain) Wallet(ctx context.Context, constraints ...constraints.WalletConstraint) (types.Wallet, error) { + return m.wallet, nil +} +func (m *mockFailingChain) ContractsRegistry() interfaces.ContractsRegistry { return m.reg } + +// mockFailingSystem implements system.System with a failing chain +type mockFailingSystem struct { + chain *mockFailingChain +} + +func (m *mockFailingSystem) Identifier() string { return "mock-failing" } +func (m *mockFailingSystem) L1() system.Chain { return m.chain } +func (m *mockFailingSystem) L2(uint64) system.Chain { return m.chain } + +// recordingT implements systest.T and records failures +type RecordingT struct { + failed bool + skipped bool + logs *bytes.Buffer + cleanup []func() + ctx context.Context +} + +func NewRecordingT(ctx context.Context) *RecordingT { + return &RecordingT{ + logs: bytes.NewBuffer(nil), + ctx: ctx, + } +} + +var _ systest.T = (*RecordingT)(nil) + +func (r *RecordingT) Context() context.Context { + return r.ctx +} + +func (r *RecordingT) WithContext(ctx context.Context) systest.T { + return &RecordingT{ + failed: r.failed, + skipped: r.skipped, + logs: r.logs, + cleanup: r.cleanup, + ctx: ctx, + } +} + +func (r *RecordingT) Deadline() (deadline time.Time, ok bool) { + // TODO + return time.Time{}, false +} + +func (r *RecordingT) Parallel() { + // TODO +} + +func (r *RecordingT) Run(name string, f func(systest.T)) { + // TODO +} + +func (r *RecordingT) Cleanup(f func()) { + r.cleanup = append(r.cleanup, f) +} + +func (r *RecordingT) Error(args ...interface{}) { + r.Log(args...) + r.Fail() +} + +func (r *RecordingT) Errorf(format string, args ...interface{}) { + r.Logf(format, args...) + r.Fail() +} + +func (r *RecordingT) Fatal(args ...interface{}) { + r.Log(args...) + r.FailNow() +} + +func (r *RecordingT) Fatalf(format string, args ...interface{}) { + r.Logf(format, args...) + r.FailNow() +} + +func (r *RecordingT) FailNow() { + r.Fail() + runtime.Goexit() +} + +func (r *RecordingT) Fail() { + r.failed = true +} + +func (r *RecordingT) Failed() bool { + return r.failed +} + +func (r *RecordingT) Helper() { + // TODO +} + +func (r *RecordingT) Log(args ...interface{}) { + fmt.Fprintln(r.logs, args...) +} + +func (r *RecordingT) Logf(format string, args ...interface{}) { + fmt.Fprintf(r.logs, format, args...) + fmt.Fprintln(r.logs) +} + +func (r *RecordingT) Name() string { + return "RecordingT" // TODO +} + +func (r *RecordingT) Setenv(key, value string) { + // Store original value + origValue, exists := os.LookupEnv(key) + + // Set new value + os.Setenv(key, value) + + // Register cleanup to restore original value + r.Cleanup(func() { + if exists { + os.Setenv(key, origValue) + } else { + os.Unsetenv(key) + } + }) + +} + +func (r *RecordingT) Skip(args ...interface{}) { + r.Log(args...) + r.SkipNow() +} + +func (r *RecordingT) SkipNow() { + r.skipped = true +} + +func (r *RecordingT) Skipf(format string, args ...interface{}) { + r.Logf(format, args...) + r.skipped = true +} + +func (r *RecordingT) Skipped() bool { + return r.skipped +} + +func (r *RecordingT) TempDir() string { + return "" // TODO +} + +func (r *RecordingT) Logs() string { + return r.logs.String() +} + +func (r *RecordingT) TestScenario(scenario systest.SystemTestFunc, sys system.System, values ...interface{}) { + // run in a separate goroutine so we can handle runtime.Goexit() + done := make(chan struct{}) + go func() { + defer close(done) + scenario(r, sys) + }() + <-done +} + +// mockBalance implements types.ReadInvocation[types.Balance] +type mockBalance struct { + bal types.Balance +} + +func (m *mockBalance) Call(ctx context.Context) (types.Balance, error) { + return m.bal, nil +} + +// mockWETH implements interfaces.SuperchainWETH +type mockWETH struct{} + +func (m *mockWETH) BalanceOf(addr types.Address) types.ReadInvocation[types.Balance] { + return &mockBalance{bal: types.NewBalance(big.NewInt(0))} +} + +// mockRegistry implements interfaces.ContractsRegistry +type mockRegistry struct{} + +func (m *mockRegistry) SuperchainWETH(addr types.Address) (interfaces.SuperchainWETH, error) { + return &mockWETH{}, nil +} diff --git a/mise.toml b/mise.toml index 68146a8ad2d..4b15d663db9 100644 --- a/mise.toml +++ b/mise.toml @@ -12,7 +12,6 @@ direnv = "2.35.0" just = "1.37.0" # Cargo dependencies -"cargo:just" = "1.37.0" "cargo:svm-rs" = "0.5.8" # Go dependencies @@ -33,6 +32,10 @@ forge = "nightly-5d16800a64e5357fbb2493e4cae061756d145981" cast = "nightly-5d16800a64e5357fbb2493e4cae061756d145981" anvil = "nightly-5d16800a64e5357fbb2493e4cae061756d145981" +# Other dependencies +codecov-uploader = "0.8.0" +goreleaser-pro = "2.3.2-pro" + # Fake dependencies # Put things here if you need to track versions of tools or projects that can't # actually be managed by mise (yet). Make sure that anything you put in here is @@ -45,6 +48,9 @@ binary_signer = "1.0.4" forge = "ubi:foundry-rs/foundry[exe=forge]" cast = "ubi:foundry-rs/foundry[exe=cast]" anvil = "ubi:foundry-rs/foundry[exe=anvil]" +just = "ubi:casey/just" +codecov-uploader = "ubi:codecov/uploader" +goreleaser-pro = "ubi:goreleaser/goreleaser-pro[exe=goreleaser]" [settings] experimental = true diff --git a/op-alt-da/damgr_test.go b/op-alt-da/damgr_test.go index cba8f3de9b4..b487fc85c98 100644 --- a/op-alt-da/damgr_test.go +++ b/op-alt-da/damgr_test.go @@ -58,7 +58,7 @@ func TestFinalization(t *testing.T) { state.Prune(bID(7)) require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) state.Prune(bID(8)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) + require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) // Track a commitment, challenge it, & then resolve it c2 := RandomCommitment(rng) @@ -84,11 +84,11 @@ func TestFinalization(t *testing.T) { // Now finalize everything state.Prune(bID(20)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) + require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) state.Prune(bID(28)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) + require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) state.Prune(bID(32)) - require.Equal(t, eth.L1BlockRef{Number: bn2}, state.lastPrunedCommitment) + require.Equal(t, l1Ref(bn2), state.lastPrunedCommitment) } // TestExpireChallenges expires challenges and prunes the state for longer windows diff --git a/op-batcher/batcher/channel_config.go b/op-batcher/batcher/channel_config.go index e62ea26eee4..bf0f5ffb4ad 100644 --- a/op-batcher/batcher/channel_config.go +++ b/op-batcher/batcher/channel_config.go @@ -53,7 +53,7 @@ type ChannelConfig struct { // ChannelConfig returns a copy of the receiver. // This allows the receiver to be a static ChannelConfigProvider of itself. -func (cc ChannelConfig) ChannelConfig() ChannelConfig { +func (cc ChannelConfig) ChannelConfig(isPectra bool) ChannelConfig { return cc } diff --git a/op-batcher/batcher/channel_config_provider.go b/op-batcher/batcher/channel_config_provider.go index 6cf5b0db686..4ce6f0c9e5e 100644 --- a/op-batcher/batcher/channel_config_provider.go +++ b/op-batcher/batcher/channel_config_provider.go @@ -10,11 +10,9 @@ import ( "github.com/ethereum/go-ethereum/params" ) -const randomByteCalldataGas = params.TxDataNonZeroGasEIP2028 - type ( ChannelConfigProvider interface { - ChannelConfig() ChannelConfig + ChannelConfig(isPectra bool) ChannelConfig } GasPricer interface { @@ -52,7 +50,7 @@ func NewDynamicEthChannelConfig(lgr log.Logger, // calldata and for blobs, given current market conditions: it will return // the appropriate ChannelConfig depending on which is cheaper. It makes // assumptions about the typical makeup of channel data. -func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig { +func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra bool) ChannelConfig { ctx, cancel := context.WithTimeout(context.Background(), dec.timeout) defer cancel() tipCap, baseFee, blobBaseFee, err := dec.gasPricer.SuggestGasPriceCaps(ctx) @@ -61,36 +59,36 @@ func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig { return *dec.lastConfig } - // We estimate the gas costs of a calldata and blob tx under the assumption that we'd fill - // a frame fully and compressed random channel data has few zeros, so they can be - // ignored in the calldata gas price estimation. - // It is also assumed that a calldata tx would contain exactly one full frame - // and a blob tx would contain target-num-frames many blobs. + // Channels built for blobs have higher capacity than channels built for calldata. + // If we have a channel built for calldata, we want to switch to blobs if the cost per byte is lower. Doing so + // will mean a new channel is built which will not be full but will eventually fill up with additional data. + // If we have a channel built for blobs, we similarly want to switch to calldata if the cost per byte is lower. Doing so + // will mean several new (full) channels will be built resulting in several calldata txs. We compute the cost per byte + // for a _single_ transaction in either case. - // It would be nicer to use core.IntrinsicGas, but we don't have the actual data at hand - calldataBytes := dec.calldataConfig.MaxFrameSize + 1 // + 1 version byte - calldataGas := big.NewInt(int64(calldataBytes*randomByteCalldataGas + params.TxGas)) - calldataPrice := new(big.Int).Add(baseFee, tipCap) - calldataCost := new(big.Int).Mul(calldataGas, calldataPrice) + // We assume that compressed random channel data has few zeros so they can be ignored (in actuality, + // zero bytes are worth one token instead of four): + calldataBytesPerTx := dec.calldataConfig.MaxFrameSize + 1 // +1 for the version byte + tokensPerCalldataTx := uint64(calldataBytesPerTx * 4) + numBlobsPerTx := dec.blobConfig.TargetNumFrames - blobGas := big.NewInt(params.BlobTxBlobGasPerBlob * int64(dec.blobConfig.TargetNumFrames)) - blobCost := new(big.Int).Mul(blobGas, blobBaseFee) - // blobs still have intrinsic calldata costs - blobCalldataCost := new(big.Int).Mul(big.NewInt(int64(params.TxGas)), calldataPrice) - blobCost = blobCost.Add(blobCost, blobCalldataCost) + // Compute the total absolute cost of submitting either a single calldata tx or a single blob tx. + calldataCost, blobCost := computeSingleCalldataTxCost(tokensPerCalldataTx, baseFee, tipCap, isPectra), + computeSingleBlobTxCost(numBlobsPerTx, baseFee, tipCap, blobBaseFee) + + // Now we compare the absolute cost per tx divided by the number of bytes per tx: + blobDataBytesPerTx := big.NewInt(eth.MaxBlobDataSize * int64(numBlobsPerTx)) - // Now we compare the prices divided by the number of bytes that can be - // submitted for that price. - blobDataBytes := big.NewInt(eth.MaxBlobDataSize * int64(dec.blobConfig.TargetNumFrames)) // The following will compare blobCost(a)/blobDataBytes(x) > calldataCost(b)/calldataBytes(y): - ay := new(big.Int).Mul(blobCost, big.NewInt(int64(calldataBytes))) - bx := new(big.Int).Mul(calldataCost, blobDataBytes) + ay := new(big.Int).Mul(blobCost, big.NewInt(int64(calldataBytesPerTx))) + bx := new(big.Int).Mul(calldataCost, blobDataBytesPerTx) + // ratio only used for logging, more correct multiplicative calculation used for comparison ayf, bxf := new(big.Float).SetInt(ay), new(big.Float).SetInt(bx) costRatio := new(big.Float).Quo(ayf, bxf) lgr := dec.log.New("base_fee", baseFee, "blob_base_fee", blobBaseFee, "tip_cap", tipCap, - "calldata_bytes", calldataBytes, "calldata_cost", calldataCost, - "blob_data_bytes", blobDataBytes, "blob_cost", blobCost, + "calldata_bytes", calldataBytesPerTx, "calldata_cost", calldataCost, + "blob_data_bytes", blobDataBytesPerTx, "blob_cost", blobCost, "cost_ratio", costRatio) if ay.Cmp(bx) == 1 { @@ -102,3 +100,34 @@ func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig { dec.lastConfig = &dec.blobConfig return dec.blobConfig } + +func computeSingleCalldataTxCost(numTokens uint64, baseFee, tipCap *big.Int, isPectra bool) *big.Int { + // We assume isContractCreation = false and execution_gas_used = 0 in https://eips.ethereum.org/EIPS/eip-7623 + // This is a safe assumption given how batcher transactions are constructed. + const ( + standardTokenCost = 4 + totalCostFloorPerToken = 10 + ) + var multiplier uint64 + if isPectra { + multiplier = totalCostFloorPerToken + } else { + multiplier = standardTokenCost + } + + calldataPrice := new(big.Int).Add(baseFee, tipCap) + calldataGas := big.NewInt(int64(params.TxGas + numTokens*multiplier)) + + return new(big.Int).Mul(calldataGas, calldataPrice) +} + +func computeSingleBlobTxCost(numBlobs int, baseFee, tipCap, blobBaseFee *big.Int) *big.Int { + // There is no execution gas or contract creation cost for blob transactions + calldataPrice := new(big.Int).Add(baseFee, tipCap) + blobCalldataCost := new(big.Int).Mul(big.NewInt(int64(params.TxGas)), calldataPrice) + + blobGas := big.NewInt(params.BlobTxBlobGasPerBlob * int64(numBlobs)) + blobCost := new(big.Int).Mul(blobGas, blobBaseFee) + + return blobCost.Add(blobCost, blobCalldataCost) +} diff --git a/op-batcher/batcher/channel_config_provider_test.go b/op-batcher/batcher/channel_config_provider_test.go index 169d122e210..95e51a921e5 100644 --- a/op-batcher/batcher/channel_config_provider_test.go +++ b/op-batcher/batcher/channel_config_provider_test.go @@ -44,6 +44,7 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { baseFee int64 blobBaseFee int64 wantCalldata bool + isL1Pectra bool }{ { name: "much-cheaper-blobs", @@ -71,6 +72,36 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { blobBaseFee: 1e9, wantCalldata: true, }, + { + name: "much-cheaper-blobs-l1-pectra", + tipCap: 1e3, + baseFee: 1e6, + blobBaseFee: 1, + isL1Pectra: true, + }, + { + name: "close-cheaper-blobs-l1-pectra", + tipCap: 1e3, + baseFee: 1e6, + blobBaseFee: 398e5, // this value just under the equilibrium point for 3 blobs + isL1Pectra: true, + }, + { + name: "close-cheaper-calldata-l1-pectra", + tipCap: 1e3, + baseFee: 1e6, + blobBaseFee: 399e5, // this value just over the equilibrium point for 3 blobs + wantCalldata: true, + isL1Pectra: true, + }, + { + name: "much-cheaper-calldata-l1-pectra", + tipCap: 1e3, + baseFee: 1e6, + blobBaseFee: 1e9, + wantCalldata: true, + isL1Pectra: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -81,7 +112,7 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { blobBaseFee: tt.blobBaseFee, } dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg) - cc := dec.ChannelConfig() + cc := dec.ChannelConfig(tt.isL1Pectra) if tt.wantCalldata { require.Equal(t, cc, calldataCfg) require.NotNil(t, ch.FindLog(testlog.NewMessageContainsFilter("calldata"))) @@ -103,24 +134,42 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { err: errors.New("gp-error"), } dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg) - require.Equal(t, dec.ChannelConfig(), blobCfg) + require.Equal(t, dec.ChannelConfig(false), blobCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelWarn), testlog.NewMessageContainsFilter("returning last config"), )) gp.err = nil - require.Equal(t, dec.ChannelConfig(), calldataCfg) + require.Equal(t, dec.ChannelConfig(false), calldataCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelInfo), testlog.NewMessageContainsFilter("calldata"), )) gp.err = errors.New("gp-error-2") - require.Equal(t, dec.ChannelConfig(), calldataCfg) + require.Equal(t, dec.ChannelConfig(false), calldataCfg) require.NotNil(t, ch.FindLog( testlog.NewLevelFilter(slog.LevelWarn), testlog.NewMessageContainsFilter("returning last config"), )) }) } + +func TestComputeSingleCalldataTxCost(t *testing.T) { + // 30KB of data + got := computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), false) + require.Equal(t, big.NewInt(1_002_000), got) // (21_000 + 4*120_000) * (1+1) + + got = computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), true) + require.Equal(t, big.NewInt(2_442_000), got) // (21_000 + 10*120_000) * (1+1) +} + +func TestComputeSingleBlobTxCost(t *testing.T) { + // This tx submits 655KB of data (21x the calldata example above) + // Setting blobBaseFee to 16x (baseFee + tipCap) gives a cost which is ~21x higher + // than the calldata example, showing the rough equilibrium point + // of the two DA markets. + got := computeSingleBlobTxCost(5, big.NewInt(1), big.NewInt(1), big.NewInt(32)) + require.Equal(t, big.NewInt(21_013_520), got) // 21_000 * (1+1) + 131_072*5*32 +} diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 24586f389a1..b02543965f9 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -62,7 +62,7 @@ func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider Channe log: log, metr: metr, cfgProvider: cfgProvider, - defaultCfg: cfgProvider.ChannelConfig(), + defaultCfg: cfgProvider.ChannelConfig(false), rollupCfg: rollupCfg, outFactory: NewChannelOut, txChannels: make(map[string]*channel), @@ -190,7 +190,7 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { // It will decide whether to switch DA type automatically. // When switching DA type, the channelManager state will be rebuilt // with a new ChannelConfig. -func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { +func (s *channelManager) TxData(l1Head eth.BlockID, isPectra bool) (txData, error) { channel, err := s.getReadyChannel(l1Head) if err != nil { return emptyTxData, err @@ -202,7 +202,7 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { } // Call provider method to reassess optimal DA type - newCfg := s.cfgProvider.ChannelConfig() + newCfg := s.cfgProvider.ChannelConfig(isPectra) // No change: if newCfg.UseBlobs == s.defaultCfg.UseBlobs { diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index cc15cffc7a1..6a7439bdf2e 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -101,9 +101,9 @@ func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) { require.NoError(t, m.AddL2Block(a)) - _, err := m.TxData(eth.BlockID{}) + _, err := m.TxData(eth.BlockID{}, false) require.NoError(t, err) - _, err = m.TxData(eth.BlockID{}) + _, err = m.TxData(eth.BlockID{}, false) require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, m.AddL2Block(x), ErrReorg) @@ -199,7 +199,7 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { require.NoError(m.AddL2Block(a)) - txdata0, err := m.TxData(eth.BlockID{}) + txdata0, err := m.TxData(eth.BlockID{}, false) require.NoError(err) txdata0bytes := txdata0.CallData() data0 := make([]byte, len(txdata0bytes)) @@ -207,13 +207,13 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { copy(data0, txdata0bytes) // ensure channel is drained - _, err = m.TxData(eth.BlockID{}) + _, err = m.TxData(eth.BlockID{}, false) require.ErrorIs(err, io.EOF) // requeue frame m.TxFailed(txdata0.ID()) - txdata1, err := m.TxData(eth.BlockID{}) + txdata1, err := m.TxData(eth.BlockID{}, false) require.NoError(err) data1 := txdata1.CallData() @@ -276,7 +276,7 @@ type FakeDynamicEthChannelConfig struct { assessments int } -func (f *FakeDynamicEthChannelConfig) ChannelConfig() ChannelConfig { +func (f *FakeDynamicEthChannelConfig) ChannelConfig(isPectra bool) ChannelConfig { f.assessments++ if f.chooseBlobs { return f.blobConfig @@ -356,7 +356,7 @@ func TestChannelManager_TxData(t *testing.T) { m.blocks = []*types.Block{blockA} // Call TxData a first time to trigger blocks->channels pipeline - _, err := m.TxData(eth.BlockID{}) + _, err := m.TxData(eth.BlockID{}, false) require.ErrorIs(t, err, io.EOF) // The test requires us to have something in the channel queue @@ -375,7 +375,7 @@ func TestChannelManager_TxData(t *testing.T) { var data txData for { m.blocks = append(m.blocks, blockA) - data, err = m.TxData(eth.BlockID{}) + data, err = m.TxData(eth.BlockID{}, false) if err == nil && data.Len() > 0 { break } diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 53e987d3082..fb791d4d216 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -426,11 +426,10 @@ func (l *BatchSubmitter) syncAndPrune(syncStatus *eth.SyncStatus) *inclusiveBloc // - drives the creation of channels and frames // - sends transactions to the DA layer func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxReceipt[txRef], receiptsLoopCancel, throttlingLoopCancel context.CancelFunc) { - defer l.wg.Done() - defer receiptsLoopCancel() - defer throttlingLoopCancel() - queue := txmgr.NewQueue[txRef](l.killCtx, l.Txmgr, l.Config.MaxPendingTransactions) + queueCtx, queueCancel := context.WithCancel(l.killCtx) + + queue := txmgr.NewQueue[txRef](queueCtx, l.Txmgr, l.Config.MaxPendingTransactions) daGroup := &errgroup.Group{} // errgroup with limit of 0 means no goroutine is able to run concurrently, // so we only set the limit if it is greater than 0. @@ -476,9 +475,15 @@ func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxR l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) case <-ctx.Done(): + queueCancel() if err := queue.Wait(); err != nil { - l.Log.Error("error waiting for transactions to complete", "err", err) + if !errors.Is(err, context.Canceled) { + l.Log.Error("error waiting for transactions to complete", "err", err) + } } + throttlingLoopCancel() + receiptsLoopCancel() + l.wg.Done() l.Log.Warn("main loop returning") return } @@ -521,7 +526,7 @@ func (l *BatchSubmitter) throttlingLoop(ctx context.Context) { defer ticker.Stop() updateParams := func(pendingBytes int64) { - ctx, cancel := context.WithTimeout(l.shutdownCtx, l.Config.NetworkTimeout) + ctx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout) defer cancel() cl, err := l.EndpointProvider.EthClient(ctx) if err != nil { @@ -542,9 +547,16 @@ func (l *BatchSubmitter) throttlingLoop(ctx context.Context) { success bool rpcErr rpc.Error ) - if err := cl.Client().CallContext( + err = cl.Client().CallContext( ctx, &success, SetMaxDASizeMethod, hexutil.Uint64(maxTxSize), hexutil.Uint64(maxBlockSize), - ); errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()).IsGenericRPCError() { + ) + if errors.Is(ctx.Err(), context.Canceled) { + // If the context was cancelled, our work is done and we expect an error here: + // So log it quietly and exit. + l.Log.Debug("DA throttling context cancelled") + return + } + if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()).IsGenericRPCError() { l.Log.Error("SetMaxDASize rpc unavailable or broken, shutting down. Either enable it or disable throttling.", "err", err) // We'd probably hit this error right after startup, so a short shutdown duration should suffice. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -602,7 +614,7 @@ func (l *BatchSubmitter) waitNodeSync() error { cCtx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout) defer cancel() - l1Tip, err := l.l1Tip(cCtx) + l1Tip, _, err := l.l1Tip(cCtx) if err != nil { return fmt.Errorf("failed to retrieve l1 tip: %w", err) } @@ -700,7 +712,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) { func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { // send all available transactions - l1tip, err := l.l1Tip(ctx) + l1tip, isPectra, err := l.l1Tip(ctx) if err != nil { l.Log.Error("Failed to query L1 tip", "err", err) return err @@ -710,7 +722,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t // Collect next transaction data. This pulls data out of the channel, so we need to make sure // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. l.channelMgrMutex.Lock() - txdata, err := l.channelMgr.TxData(l1tip.ID()) + txdata, err := l.channelMgr.TxData(l1tip.ID(), isPectra) l.channelMgrMutex.Unlock() if err == io.EOF { @@ -847,7 +859,7 @@ func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txRef // sendTx uses the txmgr queue to send the given transaction candidate after setting its // gaslimit. It will block if the txmgr queue has reached its MaxPendingTransactions limit. func (l *BatchSubmitter) sendTx(txdata txData, isCancel bool, candidate *txmgr.TxCandidate, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) { - intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, false, true, true, false) + intrinsicGas, err := core.IntrinsicGas(candidate.TxData, nil, nil, false, true, true, false) if err != nil { // we log instead of return an error here because txmgr can do its own gas estimation l.Log.Error("Failed to calculate intrinsic gas", "err", err) @@ -917,14 +929,17 @@ func (l *BatchSubmitter) recordConfirmedTx(id txID, receipt *types.Receipt) { // l1Tip gets the current L1 tip as a L1BlockRef. The passed context is assumed // to be a lifetime context, so it is internally wrapped with a network timeout. -func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) { +// It also returns a boolean indicating if the tip is from a Pectra chain. +func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, bool, error) { tctx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout) defer cancel() head, err := l.L1Client.HeaderByNumber(tctx, nil) + if err != nil { - return eth.L1BlockRef{}, fmt.Errorf("getting latest L1 block: %w", err) + return eth.L1BlockRef{}, false, fmt.Errorf("getting latest L1 block: %w", err) } - return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), nil + isPectra := head.RequestsHash != nil // See https://eips.ethereum.org/EIPS/eip-7685 + return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), isPectra, nil } func (l *BatchSubmitter) checkTxpool(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) bool { diff --git a/op-batcher/batcher/test_batch_submitter.go b/op-batcher/batcher/test_batch_submitter.go index 1d8435e1a36..93083aa0dc6 100644 --- a/op-batcher/batcher/test_batch_submitter.go +++ b/op-batcher/batcher/test_batch_submitter.go @@ -27,13 +27,13 @@ func (l *TestBatchSubmitter) JamTxPool(ctx context.Context) error { } var candidate *txmgr.TxCandidate var err error - cc := l.channelMgr.cfgProvider.ChannelConfig() + cc := l.channelMgr.cfgProvider.ChannelConfig(true) if cc.UseBlobs { candidate = l.calldataTxCandidate([]byte{}) } else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil { return err } - if candidate.GasLimit, err = core.IntrinsicGas(candidate.TxData, nil, false, true, true, false); err != nil { + if candidate.GasLimit, err = core.IntrinsicGas(candidate.TxData, nil, nil, false, true, true, false); err != nil { return err } diff --git a/op-chain-ops/cmd/check-derivation/main.go b/op-chain-ops/cmd/check-derivation/main.go index 499b128f876..08d52cca6a2 100644 --- a/op-chain-ops/cmd/check-derivation/main.go +++ b/op-chain-ops/cmd/check-derivation/main.go @@ -225,7 +225,7 @@ func getRandomSignedTransaction(ctx context.Context, ethClient *ethclient.Client var txData types.TxData switch txType { case types.LegacyTxType: - gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false) + gasLimit, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) if err != nil { return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) } @@ -242,7 +242,7 @@ func getRandomSignedTransaction(ctx context.Context, ethClient *ethclient.Client Address: randomAddress, StorageKeys: []common.Hash{common.HexToHash("0x1234")}, }} - gasLimit, err := core.IntrinsicGas(data, accessList, false, true, true, false) + gasLimit, err := core.IntrinsicGas(data, accessList, nil, false, true, true, false) if err != nil { return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) } @@ -257,7 +257,7 @@ func getRandomSignedTransaction(ctx context.Context, ethClient *ethclient.Client Data: data, } case types.DynamicFeeTxType: - gasLimit, err := core.IntrinsicGas(data, nil, false, true, true, false) + gasLimit, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) if err != nil { return nil, fmt.Errorf("failed to get intrinsicGas: %w", err) } diff --git a/op-chain-ops/cmd/op-run-block/main.go b/op-chain-ops/cmd/op-run-block/main.go index f921689390b..5738fb7ddff 100644 --- a/op-chain-ops/cmd/op-run-block/main.go +++ b/op-chain-ops/cmd/op-run-block/main.go @@ -150,7 +150,6 @@ func mainAction(c *cli.Context) error { DisableStack: false, DisableStorage: false, EnableReturnData: false, - Debug: false, Limit: 0, Overrides: nil, }, outW) @@ -323,12 +322,12 @@ func Process(logger log.Logger, config *params.ChainConfig, signer = types.MakeSigner(config, header.Number, header.Time) ) blockContext = core.NewEVMBlockContext(header, chainCtx, nil, config, statedb) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + vmenv := vm.NewEVM(blockContext, statedb, config, cfg) if beaconRoot := block.ParentBeaconRoot; beaconRoot != nil { - core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb) + core.ProcessBeaconBlockRoot(*beaconRoot, vmenv) } if config.IsPrague(blockNumber, uint64(block.Time)) { - core.ProcessParentBlockHash(block.ParentHash, vmenv, statedb) + core.ProcessParentBlockHash(block.ParentHash, vmenv) } logger.Info("Prepared EVM state") _, _ = fmt.Fprintf(outW, "# Prepared state\n") @@ -343,7 +342,7 @@ func Process(logger log.Logger, config *params.ChainConfig, } statedb.SetTxContext(tx.Hash(), i) - receipt, err := core.ApplyTransactionWithEVM(msg, config, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) + receipt, err := core.ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } diff --git a/op-chain-ops/cmd/op-simulate/main.go b/op-chain-ops/cmd/op-simulate/main.go index 83e0a7adc50..9c67c9529f1 100644 --- a/op-chain-ops/cmd/op-simulate/main.go +++ b/op-chain-ops/cmd/op-simulate/main.go @@ -158,7 +158,6 @@ func fetchPrestate(ctx context.Context, cl *rpc.Client, dir string, txHash commo DisableStack: true, DisableStorage: true, EnableReturnData: false, - Debug: false, Limit: 0, Overrides: nil, }, @@ -269,7 +268,7 @@ func simulate(ctx context.Context, logger log.Logger, conf *params.ChainConfig, } // load prestate data into memory db state - _, err = state.Commit(header.Number.Uint64()-1, true) + _, err = state.Commit(header.Number.Uint64()-1, true, conf.IsCancun(header.Number, header.Time)) if err != nil { return fmt.Errorf("failed to write state data to underlying DB: %w", err) } @@ -297,7 +296,10 @@ func simulate(ctx context.Context, logger log.Logger, conf *params.ChainConfig, // run the transaction start := time.Now() - receipt, err := core.ApplyTransaction(conf, cCtx, &sender, &gp, state, header, tx, &usedGas, vmConfig) + // nil block-author, since it defaults to header.coinbase + blockCtx := core.NewEVMBlockContext(header, cCtx, nil, conf, state) + evm := vm.NewEVM(blockCtx, state, conf, vmConfig) + receipt, err := core.ApplyTransaction(evm, &gp, state, header, tx, &usedGas) if err != nil { return fmt.Errorf("failed to apply tx: %w", err) } diff --git a/op-chain-ops/foundry/allocs_test.go b/op-chain-ops/foundry/allocs_test.go index 278393f42a3..4bb9ed4d4b5 100644 --- a/op-chain-ops/foundry/allocs_test.go +++ b/op-chain-ops/foundry/allocs_test.go @@ -58,7 +58,7 @@ func TestForgeAllocs_FromState(t *testing.T) { // Commit and make a new state, we cannot reuse the state after Commit // (see doc-comment in Commit, absolute footgun) - root, err := st.Commit(0, false) + root, err := st.Commit(0, false, false) require.NoError(t, err) st, err = state.New(root, stateDB) require.NoError(t, err) @@ -66,7 +66,7 @@ func TestForgeAllocs_FromState(t *testing.T) { st.SetState(contract, common.Hash{0: 0xa}, common.Hash{0: 1}) st.SetState(contract, crypto.Keccak256Hash([]byte("hello")), crypto.Keccak256Hash([]byte("world"))) - root, err = st.Commit(0, false) + root, err = st.Commit(0, false, false) require.NoError(t, err) st, err = state.New(root, stateDB) require.NoError(t, err) diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index ddb15a296a6..3539ca9b66b 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -354,6 +354,8 @@ type UpgradeScheduleDeployConfig struct { // When Cancun activates. Relative to L1 genesis. L1CancunTimeOffset *hexutil.Uint64 `json:"l1CancunTimeOffset,omitempty"` + // When Prague activates. Relative to L1 genesis. + L1PragueTimeOffset *hexutil.Uint64 `json:"l1PragueTimeOffset,omitempty"` // UseInterop is a flag that indicates if the system is using interop UseInterop bool `json:"useInterop,omitempty"` diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index b7d94f40074..4be4866aa5c 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -167,6 +167,10 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { cancunTime := uint64(timestamp) + uint64(*config.L1CancunTimeOffset) chainConfig.CancunTime = &cancunTime } + if config.L1PragueTimeOffset != nil { + pragueTime := uint64(timestamp) + uint64(*config.L1PragueTimeOffset) + chainConfig.PragueTime = &pragueTime + } return &core.Genesis{ Config: &chainConfig, diff --git a/op-chain-ops/script/forking/reader.go b/op-chain-ops/script/forking/reader.go index d943ddf1444..ab4b9f5ba6a 100644 --- a/op-chain-ops/script/forking/reader.go +++ b/op-chain-ops/script/forking/reader.go @@ -31,6 +31,14 @@ func (f *forkStateReader) Storage(addr common.Address, slot common.Hash) (common return common.Hash(v), nil } +func (f *forkStateReader) Code(addr common.Address, codeHash common.Hash) ([]byte, error) { + return f.trie.ContractCode(addr, codeHash) +} + +func (f *forkStateReader) CodeSize(addr common.Address, codeHash common.Hash) (int, error) { + return f.trie.ContractCodeSize(addr, codeHash) +} + func (f *forkStateReader) Copy() state.Reader { return f } diff --git a/op-chain-ops/script/forking/state.go b/op-chain-ops/script/forking/state.go index e59b1c4bc29..de628712f5c 100644 --- a/op-chain-ops/script/forking/state.go +++ b/op-chain-ops/script/forking/state.go @@ -278,8 +278,8 @@ func (fst *ForkableState) GetCode(address common.Address) []byte { return fst.stateFor(address).GetCode(address) } -func (fst *ForkableState) SetCode(address common.Address, bytes []byte) { - fst.stateFor(address).SetCode(address, bytes) +func (fst *ForkableState) SetCode(address common.Address, bytes []byte) []byte { + return fst.stateFor(address).SetCode(address, bytes) } func (fst *ForkableState) GetCodeSize(address common.Address) int { diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index dce140f7cf4..e933779339c 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -289,7 +289,8 @@ func NewHost( CallerOverride: h.handleCaller, } - h.env = vm.NewEVM(blockContext, txContext, h.state, h.chainCfg, vmCfg) + h.env = vm.NewEVM(blockContext, h.state, h.chainCfg, vmCfg) + h.env.SetTxContext(txContext) return h } @@ -697,7 +698,7 @@ func (h *Host) StateDump() (*foundry.ForgeAllocs, error) { baseState := h.baseState // We have to commit the existing state to the trie, // for all the state-changes to be captured by the trie iterator. - root, err := baseState.Commit(h.env.Context.BlockNumber.Uint64(), true) + root, err := baseState.Commit(h.env.Context.BlockNumber.Uint64(), true, false) if err != nil { return nil, fmt.Errorf("failed to commit state: %w", err) } diff --git a/op-challenger/cmd/run_trace.go b/op-challenger/cmd/run_trace.go index 7c52d0de13b..07e74fdd79d 100644 --- a/op-challenger/cmd/run_trace.go +++ b/op-challenger/cmd/run_trace.go @@ -101,9 +101,13 @@ func parseRunArg(arg string) (runner.RunConfig, error) { cfg.Name = cfg.TraceType.String() } if len(opts) > 2 { - cfg.Prestate = common.HexToHash(opts[2]) - if cfg.Prestate == (common.Hash{}) { - return runner.RunConfig{}, fmt.Errorf("%w %q for run config %q", ErrInvalidPrestateHash, opts[2], arg) + if strings.HasPrefix(opts[2], "0x") { + cfg.Prestate = common.HexToHash(opts[2]) + if cfg.Prestate == (common.Hash{}) { + return runner.RunConfig{}, fmt.Errorf("%w %q for run config %q", ErrInvalidPrestateHash, opts[2], arg) + } + } else { + cfg.PrestateFilename = opts[2] } } return cfg, nil diff --git a/op-challenger/cmd/run_trace_test.go b/op-challenger/cmd/run_trace_test.go index 78e047827dd..0e2512dd1a1 100644 --- a/op-challenger/cmd/run_trace_test.go +++ b/op-challenger/cmd/run_trace_test.go @@ -21,7 +21,8 @@ func TestParseRunArg(t *testing.T) { {arg: "asterisc", expected: runner.RunConfig{TraceType: types.TraceTypeAsterisc, Name: types.TraceTypeAsterisc.String()}}, {arg: "cannon/test1", expected: runner.RunConfig{TraceType: types.TraceTypeCannon, Name: "test1"}}, {arg: "cannon/test1/0x1234", expected: runner.RunConfig{TraceType: types.TraceTypeCannon, Name: "test1", Prestate: common.HexToHash("0x1234")}}, - {arg: "cannon/test1/invalid", err: ErrInvalidPrestateHash}, + {arg: "cannon/test1/0xinvalid", err: ErrInvalidPrestateHash}, + {arg: "cannon/test1/develop.bin.gz", expected: runner.RunConfig{TraceType: types.TraceTypeCannon, Name: "test1", PrestateFilename: "develop.bin.gz"}}, } for _, test := range tests { test := test diff --git a/op-challenger/game/fault/trace/super/provider.go b/op-challenger/game/fault/trace/super/provider.go index 58268fe41ca..4fe60a9f9b7 100644 --- a/op-challenger/game/fault/trace/super/provider.go +++ b/op-challenger/game/fault/trace/super/provider.go @@ -32,11 +32,13 @@ type PreimagePrestateProvider interface { } type RootProvider interface { SuperRootAtTimestamp(ctx context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) + AllSafeDerivedAt(ctx context.Context, derivedFrom eth.BlockID) (map[eth.ChainID]eth.BlockID, error) } type SuperTraceProvider struct { PreimagePrestateProvider logger log.Logger + rollupCfgs *RollupConfigs rootProvider RootProvider prestateTimestamp uint64 poststateTimestamp uint64 @@ -44,10 +46,11 @@ type SuperTraceProvider struct { gameDepth types.Depth } -func NewSuperTraceProvider(logger log.Logger, prestateProvider PreimagePrestateProvider, rootProvider RootProvider, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) *SuperTraceProvider { +func NewSuperTraceProvider(logger log.Logger, rollupCfgs *RollupConfigs, prestateProvider PreimagePrestateProvider, rootProvider RootProvider, l1Head eth.BlockID, gameDepth types.Depth, prestateTimestamp, poststateTimestamp uint64) *SuperTraceProvider { return &SuperTraceProvider{ - PreimagePrestateProvider: prestateProvider, logger: logger, + rollupCfgs: rollupCfgs, + PreimagePrestateProvider: prestateProvider, rootProvider: rootProvider, prestateTimestamp: prestateTimestamp, poststateTimestamp: poststateTimestamp, @@ -76,6 +79,9 @@ func (s *SuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Pos if err != nil { return nil, fmt.Errorf("failed to retrieve super root at timestamp %v: %w", timestamp, err) } + if root.CrossSafeDerivedFrom.Number > s.l1Head.Number { + return InvalidTransition, nil + } return responseToSuper(root).Marshal(), nil } // Fetch the super root at the next timestamp since we are part way through the transition to it @@ -83,11 +89,25 @@ func (s *SuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Pos if err != nil { return nil, fmt.Errorf("failed to retrieve super root at timestamp %v: %w", timestamp, err) } + if prevRoot.CrossSafeDerivedFrom.Number > s.l1Head.Number { + // The previous root was not safe at the game L1 head so we must have already transitioned to the invalid hash + // prior to this step and it then repeats forever. + return InvalidTransition, nil + } nextTimestamp := timestamp + 1 nextRoot, err := s.rootProvider.SuperRootAtTimestamp(ctx, hexutil.Uint64(nextTimestamp)) if err != nil { return nil, fmt.Errorf("failed to retrieve super root at timestamp %v: %w", nextTimestamp, err) } + + var safeHeads map[eth.ChainID]eth.BlockID + // If the next root is not cross safe, fetch the individual chain safe heads at L1Head to find which chains were unsafe + if nextRoot.CrossSafeDerivedFrom.Number > s.l1Head.Number { + safeHeads, err = s.rootProvider.AllSafeDerivedAt(ctx, s.l1Head) + if err != nil { + return nil, fmt.Errorf("failed to retrieve safe derived blocks at L1 head %v: %w", s.l1Head, err) + } + } superV1 := responseToSuper(prevRoot) expectedState := interopTypes.TransitionState{ SuperRoot: superV1.Marshal(), @@ -95,7 +115,24 @@ func (s *SuperTraceProvider) GetPreimageBytes(ctx context.Context, pos types.Pos Step: step, } for i := uint64(0); i < min(step, uint64(len(nextRoot.Chains))); i++ { - rawOutput, err := eth.UnmarshalOutput(nextRoot.Chains[i].Pending) + chainInfo := nextRoot.Chains[i] + if len(safeHeads) > 0 { + // Need to check if this chain's safe head included the block we need + rollupCfg, ok := s.rollupCfgs.Get(chainInfo.ChainID) + if !ok { + return nil, fmt.Errorf("rollup config unavailable for chain %d: %w", chainInfo.ChainID, err) + } + requiredBlockNum, err := rollupCfg.TargetBlockNumber(nextTimestamp) + if err != nil { + return nil, fmt.Errorf("no target block number for chain %v at %v: %w", chainInfo.ChainID, nextTimestamp, err) + } + if safeHead, ok := safeHeads[chainInfo.ChainID]; !ok { + return nil, fmt.Errorf("no safe head known for chain %v at %v: %w", chainInfo.ChainID, nextTimestamp, err) + } else if safeHead.Number < requiredBlockNum { + return InvalidTransition, nil + } + } + rawOutput, err := eth.UnmarshalOutput(chainInfo.Pending) if err != nil { return nil, fmt.Errorf("failed to unmarshal pending output %v at timestamp %v: %w", i, nextTimestamp, err) } diff --git a/op-challenger/game/fault/trace/super/provider_test.go b/op-challenger/game/fault/trace/super/provider_test.go index db6729a96d4..422c073c9e7 100644 --- a/op-challenger/game/fault/trace/super/provider_test.go +++ b/op-challenger/game/fault/trace/super/provider_test.go @@ -7,7 +7,9 @@ import ( "math/rand" "testing" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-node/rollup" interopTypes "github.com/ethereum-optimism/optimism/op-program/client/interop/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" @@ -27,10 +29,11 @@ var ( func TestGet(t *testing.T) { t.Run("AtPostState", func(t *testing.T) { - provider, stubSupervisor := createProvider(t) + provider, stubSupervisor, l1Head, _ := createProvider(t) response := eth.SuperRootResponse{ - Timestamp: poststateTimestamp, - SuperRoot: eth.Bytes32{0xaa}, + CrossSafeDerivedFrom: l1Head, + Timestamp: poststateTimestamp, + SuperRoot: eth.Bytes32{0xaa}, Chains: []eth.ChainRootInfo{ { ChainID: eth.ChainIDFromUInt64(1), @@ -47,10 +50,11 @@ func TestGet(t *testing.T) { }) t.Run("AtNewTimestamp", func(t *testing.T) { - provider, stubSupervisor := createProvider(t) + provider, stubSupervisor, l1Head, _ := createProvider(t) response := eth.SuperRootResponse{ - Timestamp: prestateTimestamp + 1, - SuperRoot: eth.Bytes32{0xaa}, + CrossSafeDerivedFrom: l1Head, + Timestamp: prestateTimestamp + 1, + SuperRoot: eth.Bytes32{0xaa}, Chains: []eth.ChainRootInfo{ { ChainID: eth.ChainIDFromUInt64(1), @@ -66,115 +70,174 @@ func TestGet(t *testing.T) { require.Equal(t, common.Hash(eth.SuperRoot(expected)), claim) }) - t.Run("FirstTimestamp", func(t *testing.T) { - rng := rand.New(rand.NewSource(1)) - provider, stubSupervisor := createProvider(t) - outputA1 := testutils.RandomOutputV0(rng) - outputA2 := testutils.RandomOutputV0(rng) - outputB1 := testutils.RandomOutputV0(rng) - outputB2 := testutils.RandomOutputV0(rng) - superRoot1 := eth.NewSuperV1( - prestateTimestamp, - eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(1), Output: eth.OutputRoot(outputA1)}, - eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(2), Output: eth.OutputRoot(outputB1)}) - superRoot2 := eth.NewSuperV1(prestateTimestamp+1, - eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(1), Output: eth.OutputRoot(outputA2)}, - eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(2), Output: eth.OutputRoot(outputB2)}) - stubSupervisor.Add(eth.SuperRootResponse{ - Timestamp: prestateTimestamp, - SuperRoot: eth.SuperRoot(superRoot1), + t.Run("ValidTransitionBetweenFirstTwoSuperRoots", func(t *testing.T) { + provider, stubSupervisor, l1Head, _ := createProvider(t) + prev, next := createValidSuperRoots(l1Head) + stubSupervisor.Add(prev.response) + stubSupervisor.Add(next.response) + + expectValidTransition(t, provider, prev, next) + }) + + t.Run("Step0SuperRootIsSafeBeforeGameL1Head", func(t *testing.T) { + provider, stubSupervisor, l1Head, _ := createProvider(t) + response := eth.SuperRootResponse{ + CrossSafeDerivedFrom: eth.BlockID{Number: l1Head.Number - 10, Hash: common.Hash{0xcc}}, + Timestamp: poststateTimestamp, + SuperRoot: eth.Bytes32{0xaa}, Chains: []eth.ChainRootInfo{ { ChainID: eth.ChainIDFromUInt64(1), - Canonical: eth.OutputRoot(outputA1), - Pending: outputA1.Marshal(), - }, - { - ChainID: eth.ChainIDFromUInt64(2), - Canonical: eth.OutputRoot(outputB1), - Pending: outputB1.Marshal(), + Canonical: eth.Bytes32{0xbb}, + Pending: []byte{0xcc}, }, }, - }) - stubSupervisor.Add(eth.SuperRootResponse{ - Timestamp: prestateTimestamp + 1, - SuperRoot: eth.SuperRoot(superRoot2), + } + stubSupervisor.Add(response) + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + expected := responseToSuper(response) + require.Equal(t, common.Hash(eth.SuperRoot(expected)), claim) + }) + + t.Run("Step0SuperRootNotSafeAtGameL1Head", func(t *testing.T) { + provider, stubSupervisor, l1Head, _ := createProvider(t) + response := eth.SuperRootResponse{ + CrossSafeDerivedFrom: eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xaa}}, + Timestamp: poststateTimestamp, + SuperRoot: eth.Bytes32{0xaa}, Chains: []eth.ChainRootInfo{ { ChainID: eth.ChainIDFromUInt64(1), - Canonical: eth.OutputRoot(outputA2), - Pending: outputA2.Marshal(), - }, - { - ChainID: eth.ChainIDFromUInt64(1), - Canonical: eth.OutputRoot(outputB2), - Pending: outputB2.Marshal(), + Canonical: eth.Bytes32{0xbb}, + Pending: []byte{0xcc}, }, }, - }) + } + stubSupervisor.Add(response) + claim, err := provider.Get(context.Background(), types.RootPosition) + require.NoError(t, err) + require.Equal(t, InvalidTransitionHash, claim) + }) - expectedFirstStep := &interopTypes.TransitionState{ - SuperRoot: superRoot1.Marshal(), - PendingProgress: []interopTypes.OptimisticBlock{ - {BlockHash: outputA2.BlockHash, OutputRoot: eth.OutputRoot(outputA2)}, - }, - Step: 1, + t.Run("NextSuperRootSafeBeforeGameL1Head", func(t *testing.T) { + provider, stubSupervisor, l1Head, _ := createProvider(t) + prev, next := createValidSuperRoots(l1Head) + // Make super roots be safe earlier + prev.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number - 10, Hash: common.Hash{0xaa}} + next.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number - 5, Hash: common.Hash{0xbb}} + stubSupervisor.Add(prev.response) + stubSupervisor.Add(next.response) + expectValidTransition(t, provider, prev, next) + }) + + t.Run("PreviousSuperRootNotSafeAtGameL1Head", func(t *testing.T) { + provider, stubSupervisor, l1Head, _ := createProvider(t) + prev, next := createValidSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xaa}} + next.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number + 2, Hash: common.Hash{0xbb}} + stubSupervisor.Add(prev.response) + stubSupervisor.Add(next.response) + + // All steps should be the invalid transition hash. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) } - claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + }) + + t.Run("FirstChainUnsafe", func(t *testing.T) { + provider, stubSupervisor, l1Head, rollupCfgs := createProvider(t) + prev, next := createValidSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number, Hash: common.Hash{0xaa}} + next.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}} + stubSupervisor.Add(prev.response) + stubSupervisor.Add(next.response) + + chain1Cfg, ok := rollupCfgs.Get(eth.ChainIDFromUInt64(1)) + require.True(t, ok) + chain2Cfg, ok := rollupCfgs.Get(eth.ChainIDFromUInt64(2)) + require.True(t, ok) + chain1RequiredBlock, err := chain1Cfg.TargetBlockNumber(prestateTimestamp + 1) require.NoError(t, err) - require.Equal(t, expectedFirstStep.Hash(), claim) + chain2RequiredBlock, err := chain2Cfg.TargetBlockNumber(prestateTimestamp + 1) + require.NoError(t, err) + stubSupervisor.SetAllSafeDerivedAt(l1Head, map[eth.ChainID]eth.BlockID{ + eth.ChainIDFromUInt64(1): {Number: chain1RequiredBlock - 1, Hash: common.Hash{0xcc}}, + eth.ChainIDFromUInt64(2): {Number: chain2RequiredBlock, Hash: common.Hash{0xcc}}, + }) - expectedSecondStep := &interopTypes.TransitionState{ - SuperRoot: superRoot1.Marshal(), - PendingProgress: []interopTypes.OptimisticBlock{ - {BlockHash: outputA2.BlockHash, OutputRoot: eth.OutputRoot(outputA2)}, - {BlockHash: outputB2.BlockHash, OutputRoot: eth.OutputRoot(outputB2)}, - }, - Step: 2, + // All steps should be the invalid transition hash. + for i := int64(0); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) + require.NoError(t, err) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) } - claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + }) + t.Run("SecondChainUnsafe", func(t *testing.T) { + provider, stubSupervisor, l1Head, rollupCfgs := createProvider(t) + prev, next := createValidSuperRoots(l1Head) + // Make super roots be safe only after L1 head + prev.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number, Hash: common.Hash{0xaa}} + next.response.CrossSafeDerivedFrom = eth.BlockID{Number: l1Head.Number + 1, Hash: common.Hash{0xbb}} + stubSupervisor.Add(prev.response) + stubSupervisor.Add(next.response) + + chain1Cfg, ok := rollupCfgs.Get(eth.ChainIDFromUInt64(1)) + require.True(t, ok) + chain2Cfg, ok := rollupCfgs.Get(eth.ChainIDFromUInt64(2)) + require.True(t, ok) + chain1RequiredBlock, err := chain1Cfg.TargetBlockNumber(prestateTimestamp + 1) require.NoError(t, err) - require.Equal(t, expectedSecondStep.Hash(), claim) - - for step := uint64(3); step < StepsPerTimestamp; step++ { - expectedPaddingStep := &interopTypes.TransitionState{ - SuperRoot: superRoot1.Marshal(), - PendingProgress: []interopTypes.OptimisticBlock{ - {BlockHash: outputA2.BlockHash, OutputRoot: eth.OutputRoot(outputA2)}, - {BlockHash: outputB2.BlockHash, OutputRoot: eth.OutputRoot(outputB2)}, - }, - Step: step, - } - claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, new(big.Int).SetUint64(step-1))) + chain2RequiredBlock, err := chain2Cfg.TargetBlockNumber(prestateTimestamp + 1) + require.NoError(t, err) + stubSupervisor.SetAllSafeDerivedAt(l1Head, map[eth.ChainID]eth.BlockID{ + eth.ChainIDFromUInt64(1): {Number: chain1RequiredBlock, Hash: common.Hash{0xcc}}, + eth.ChainIDFromUInt64(2): {Number: chain2RequiredBlock - 1, Hash: common.Hash{0xcc}}, + }) + + // First step should be valid because we can reach the required block on chain 1 + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.NotEqual(t, InvalidTransitionHash, claim, "incorrect claim at index 0") + + // Remaining steps should be the invalid transition hash. + for i := int64(1); i < StepsPerTimestamp+1; i++ { + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(i))) require.NoError(t, err) - require.Equalf(t, expectedPaddingStep.Hash(), claim, "incorrect hash at step %v", step) + require.Equalf(t, InvalidTransitionHash, claim, "incorrect claim at index %d", i) } }) } func TestGetStepDataReturnsError(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) _, _, _, err := provider.GetStepData(context.Background(), types.RootPosition) require.ErrorIs(t, err, ErrGetStepData) } func TestGetL2BlockNumberChallengeReturnsError(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) _, err := provider.GetL2BlockNumberChallenge(context.Background()) require.ErrorIs(t, err, types.ErrL2BlockNumberValid) } func TestComputeStep(t *testing.T) { t.Run("ErrorWhenTraceIndexTooBig", func(t *testing.T) { + rollupCfgs, err := NewRollupConfigs(vm.Config{}) + require.NoError(t, err) // Uses a big game depth so the trace index doesn't fit in uint64 - provider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), nil, &stubRootProvider{}, eth.BlockID{}, 65, prestateTimestamp, poststateTimestamp) + provider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), rollupCfgs, nil, &stubRootProvider{}, eth.BlockID{}, 65, prestateTimestamp, poststateTimestamp) // Left-most position in top game - _, _, err := provider.ComputeStep(types.RootPosition) + _, _, err = provider.ComputeStep(types.RootPosition) require.ErrorIs(t, err, ErrIndexTooBig) }) t.Run("FirstTimestampSteps", func(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) for i := int64(0); i < StepsPerTimestamp-1; i++ { timestamp, step, err := provider.ComputeStep(types.NewPosition(gameDepth, big.NewInt(i))) require.NoError(t, err) @@ -186,7 +249,7 @@ func TestComputeStep(t *testing.T) { }) t.Run("SecondTimestampSteps", func(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) for i := int64(-1); i < StepsPerTimestamp-1; i++ { traceIndex := StepsPerTimestamp + i timestamp, step, err := provider.ComputeStep(types.NewPosition(gameDepth, big.NewInt(traceIndex))) @@ -198,7 +261,7 @@ func TestComputeStep(t *testing.T) { }) t.Run("LimitToPoststateTimestamp", func(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) timestamp, step, err := provider.ComputeStep(types.RootPosition) require.NoError(t, err) require.Equal(t, poststateTimestamp, timestamp, "Incorrect timestamp at root position") @@ -206,7 +269,7 @@ func TestComputeStep(t *testing.T) { }) t.Run("StepShouldLoopBackToZero", func(t *testing.T) { - provider, _ := createProvider(t) + provider, _, _, _ := createProvider(t) prevTimestamp := prestateTimestamp prevStep := uint64(0) // Absolute prestate is always on a timestamp boundary, so step 0 for traceIndex := int64(0); traceIndex < 5*StepsPerTimestamp; traceIndex++ { @@ -225,16 +288,143 @@ func TestComputeStep(t *testing.T) { }) } -func createProvider(t *testing.T) (*SuperTraceProvider, *stubRootProvider) { +func createProvider(t *testing.T) (*SuperTraceProvider, *stubRootProvider, eth.BlockID, *RollupConfigs) { logger := testlog.Logger(t, log.LvlInfo) + l1Head := eth.BlockID{Number: 23542, Hash: common.Hash{0xab, 0xcd}} stubSupervisor := &stubRootProvider{ rootsByTimestamp: make(map[uint64]eth.SuperRootResponse), } - return NewSuperTraceProvider(logger, nil, stubSupervisor, eth.BlockID{}, gameDepth, prestateTimestamp, poststateTimestamp), stubSupervisor + chain1Cfg := &rollup.Config{ + L2ChainID: big.NewInt(1), + Genesis: rollup.Genesis{ + L2Time: 500, + }, + BlockTime: 1, + } + chain2Cfg := &rollup.Config{ + L2ChainID: big.NewInt(2), + Genesis: rollup.Genesis{ + L2Time: 500, + }, + BlockTime: 1, + } + rollupCfgs, err := NewRollupConfigsFromParsed(chain1Cfg, chain2Cfg) + require.NoError(t, err) + provider := NewSuperTraceProvider(logger, rollupCfgs, nil, stubSupervisor, l1Head, gameDepth, prestateTimestamp, poststateTimestamp) + return provider, stubSupervisor, l1Head, rollupCfgs +} + +type superRootData struct { + response eth.SuperRootResponse + super *eth.SuperV1 + canonical []*eth.OutputV0 + pending []*eth.OutputV0 +} + +func createValidSuperRoots(l1Head eth.BlockID) (superRootData, superRootData) { + rng := rand.New(rand.NewSource(1)) + outputA1 := testutils.RandomOutputV0(rng) + outputA2 := testutils.RandomOutputV0(rng) + outputB1 := testutils.RandomOutputV0(rng) + outputB2 := testutils.RandomOutputV0(rng) + prevSuper := eth.NewSuperV1( + prestateTimestamp, + eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(1), Output: eth.OutputRoot(outputA1)}, + eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(2), Output: eth.OutputRoot(outputB1)}) + nextSuper := eth.NewSuperV1(prestateTimestamp+1, + eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(1), Output: eth.OutputRoot(outputA2)}, + eth.ChainIDAndOutput{ChainID: eth.ChainIDFromUInt64(2), Output: eth.OutputRoot(outputB2)}) + prevResponse := eth.SuperRootResponse{ + CrossSafeDerivedFrom: l1Head, + Timestamp: prestateTimestamp, + SuperRoot: eth.SuperRoot(prevSuper), + Chains: []eth.ChainRootInfo{ + { + ChainID: eth.ChainIDFromUInt64(1), + Canonical: eth.OutputRoot(outputA1), + Pending: outputA1.Marshal(), + }, + { + ChainID: eth.ChainIDFromUInt64(2), + Canonical: eth.OutputRoot(outputB1), + Pending: outputB1.Marshal(), + }, + }, + } + nextResponse := eth.SuperRootResponse{ + CrossSafeDerivedFrom: l1Head, + Timestamp: prestateTimestamp + 1, + SuperRoot: eth.SuperRoot(nextSuper), + Chains: []eth.ChainRootInfo{ + { + ChainID: eth.ChainIDFromUInt64(1), + Canonical: eth.OutputRoot(outputA2), + Pending: outputA2.Marshal(), + }, + { + ChainID: eth.ChainIDFromUInt64(2), + Canonical: eth.OutputRoot(outputB2), + Pending: outputB2.Marshal(), + }, + }, + } + prev := superRootData{ + response: prevResponse, + super: prevSuper, + canonical: []*eth.OutputV0{outputA1, outputB1}, + pending: []*eth.OutputV0{outputA1, outputB1}, + } + next := superRootData{ + response: nextResponse, + super: nextSuper, + canonical: []*eth.OutputV0{outputA2, outputB2}, + pending: []*eth.OutputV0{outputA2, outputB2}, + } + return prev, next +} + +func expectValidTransition(t *testing.T, provider *SuperTraceProvider, prev superRootData, next superRootData) { + expectedFirstStep := &interopTypes.TransitionState{ + SuperRoot: prev.super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{ + {BlockHash: next.pending[0].BlockHash, OutputRoot: eth.OutputRoot(next.pending[0])}, + }, + Step: 1, + } + claim, err := provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(0))) + require.NoError(t, err) + require.Equal(t, expectedFirstStep.Hash(), claim) + + expectedSecondStep := &interopTypes.TransitionState{ + SuperRoot: prev.super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{ + {BlockHash: next.pending[0].BlockHash, OutputRoot: eth.OutputRoot(next.pending[0])}, + {BlockHash: next.pending[1].BlockHash, OutputRoot: eth.OutputRoot(next.pending[1])}, + }, + Step: 2, + } + claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, big.NewInt(1))) + require.NoError(t, err) + require.Equal(t, expectedSecondStep.Hash(), claim) + + for step := uint64(3); step < StepsPerTimestamp; step++ { + expectedPaddingStep := &interopTypes.TransitionState{ + SuperRoot: prev.super.Marshal(), + PendingProgress: []interopTypes.OptimisticBlock{ + {BlockHash: next.pending[0].BlockHash, OutputRoot: eth.OutputRoot(next.pending[0])}, + {BlockHash: next.pending[1].BlockHash, OutputRoot: eth.OutputRoot(next.pending[1])}, + }, + Step: step, + } + claim, err = provider.Get(context.Background(), types.NewPosition(gameDepth, new(big.Int).SetUint64(step-1))) + require.NoError(t, err) + require.Equalf(t, expectedPaddingStep.Hash(), claim, "incorrect hash at step %v", step) + } } type stubRootProvider struct { rootsByTimestamp map[uint64]eth.SuperRootResponse + allSafeDerivedAt map[eth.BlockID]map[eth.ChainID]eth.BlockID } func (s *stubRootProvider) Add(root eth.SuperRootResponse) { @@ -244,6 +434,21 @@ func (s *stubRootProvider) Add(root eth.SuperRootResponse) { s.rootsByTimestamp[root.Timestamp] = root } +func (s *stubRootProvider) SetAllSafeDerivedAt(derivedFrom eth.BlockID, safeHeads map[eth.ChainID]eth.BlockID) { + if s.allSafeDerivedAt == nil { + s.allSafeDerivedAt = make(map[eth.BlockID]map[eth.ChainID]eth.BlockID) + } + s.allSafeDerivedAt[derivedFrom] = safeHeads +} + +func (s *stubRootProvider) AllSafeDerivedAt(_ context.Context, derivedFrom eth.BlockID) (map[eth.ChainID]eth.BlockID, error) { + heads, ok := s.allSafeDerivedAt[derivedFrom] + if !ok { + return nil, fmt.Errorf("no heads found for block %d", derivedFrom) + } + return heads, nil +} + func (s *stubRootProvider) SuperRootAtTimestamp(_ context.Context, timestamp hexutil.Uint64) (eth.SuperRootResponse, error) { root, ok := s.rootsByTimestamp[uint64(timestamp)] if !ok { diff --git a/op-challenger/game/fault/trace/super/rollup_configs.go b/op-challenger/game/fault/trace/super/rollup_configs.go new file mode 100644 index 00000000000..878183fe036 --- /dev/null +++ b/op-challenger/game/fault/trace/super/rollup_configs.go @@ -0,0 +1,78 @@ +package super + +import ( + "errors" + "fmt" + "os" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +var ErrDuplicateChain = errors.New("duplicate chain") + +type RollupConfigs struct { + cfgs map[eth.ChainID]*rollup.Config +} + +func NewRollupConfigs(vmCfg vm.Config) (*RollupConfigs, error) { + cfgs := make(map[eth.ChainID]*rollup.Config) + for _, network := range vmCfg.Networks { + cfg, err := chaincfg.GetRollupConfig(network) + if err != nil { + return nil, err + } + if err := addConfig(cfgs, cfg); err != nil { + return nil, err + } + } + for _, path := range vmCfg.RollupConfigPaths { + cfg, err := loadRollupConfig(path) + if err != nil { + return nil, err + } + if err := addConfig(cfgs, cfg); err != nil { + return nil, err + } + } + return &RollupConfigs{ + cfgs: cfgs, + }, nil +} + +func NewRollupConfigsFromParsed(rollupCfgs ...*rollup.Config) (*RollupConfigs, error) { + cfgs := make(map[eth.ChainID]*rollup.Config) + for _, cfg := range rollupCfgs { + if err := addConfig(cfgs, cfg); err != nil { + return nil, err + } + } + return &RollupConfigs{cfgs: cfgs}, nil +} + +func addConfig(cfgs map[eth.ChainID]*rollup.Config, cfg *rollup.Config) error { + chainID := eth.ChainIDFromBig(cfg.L2ChainID) + if _, ok := cfgs[chainID]; ok { + return fmt.Errorf("%w: %v", ErrDuplicateChain, chainID) + } + cfgs[chainID] = cfg + return nil +} + +func (c *RollupConfigs) Get(chainID eth.ChainID) (*rollup.Config, bool) { + cfg, ok := c.cfgs[chainID] + return cfg, ok +} + +func loadRollupConfig(rollupConfigPath string) (*rollup.Config, error) { + file, err := os.Open(rollupConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to read rollup config: %w", err) + } + defer file.Close() + + var rollupConfig rollup.Config + return &rollupConfig, rollupConfig.ParseRollupConfig(file) +} diff --git a/op-challenger/game/fault/trace/super/rollup_configs_test.go b/op-challenger/game/fault/trace/super/rollup_configs_test.go new file mode 100644 index 00000000000..e00a59d8528 --- /dev/null +++ b/op-challenger/game/fault/trace/super/rollup_configs_test.go @@ -0,0 +1,128 @@ +package super + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +func TestRollupConfigs(t *testing.T) { + t.Run("LoadNamedNetworks", func(t *testing.T) { + vmCfg := vm.Config{ + Networks: []string{"op-mainnet", "op-sepolia"}, + } + configs, err := NewRollupConfigs(vmCfg) + require.NoError(t, err) + require.Len(t, configs.cfgs, 2) + expectedMainnet, err := chaincfg.GetRollupConfig("op-mainnet") + require.NoError(t, err) + expectedSepolia, err := chaincfg.GetRollupConfig("op-sepolia") + require.NoError(t, err) + actual, ok := configs.Get(eth.ChainIDFromBig(expectedMainnet.L2ChainID)) + require.True(t, ok, "did not load mainnet config") + require.EqualValues(t, expectedMainnet, actual) + actual, ok = configs.Get(eth.ChainIDFromBig(expectedSepolia.L2ChainID)) + require.True(t, ok, "did not load sepolia config") + require.EqualValues(t, expectedSepolia, actual) + }) + + t.Run("LoadConfigFiles", func(t *testing.T) { + expectedMainnet, err := chaincfg.GetRollupConfig("op-mainnet") + require.NoError(t, err) + expectedSepolia, err := chaincfg.GetRollupConfig("op-sepolia") + require.NoError(t, err) + + dir := t.TempDir() + writeConfig := func(cfg *rollup.Config) string { + data, err := json.Marshal(cfg) + require.NoError(t, err) + path := filepath.Join(dir, cfg.L2ChainID.String()+".json") + err = os.WriteFile(path, data, 0600) + require.NoError(t, err) + return path + } + + mainnetFile := writeConfig(expectedMainnet) + sepoliaFile := writeConfig(expectedSepolia) + + vmCfg := vm.Config{ + RollupConfigPaths: []string{mainnetFile, sepoliaFile}, + } + configs, err := NewRollupConfigs(vmCfg) + require.NoError(t, err) + require.Len(t, configs.cfgs, 2) + actual, ok := configs.Get(eth.ChainIDFromBig(expectedMainnet.L2ChainID)) + require.True(t, ok, "did not load mainnet config") + require.EqualValues(t, expectedMainnet, actual) + actual, ok = configs.Get(eth.ChainIDFromBig(expectedSepolia.L2ChainID)) + require.True(t, ok, "did not load sepolia config") + require.EqualValues(t, expectedSepolia, actual) + }) + + t.Run("CombineLoadedConfigFiles", func(t *testing.T) { + expectedMainnet, err := chaincfg.GetRollupConfig("op-mainnet") + require.NoError(t, err) + expectedSepolia, err := chaincfg.GetRollupConfig("op-sepolia") + require.NoError(t, err) + + mainnetFile := writeConfig(t, expectedMainnet) + + vmCfg := vm.Config{ + RollupConfigPaths: []string{mainnetFile}, + Networks: []string{"op-sepolia"}, + } + configs, err := NewRollupConfigs(vmCfg) + require.NoError(t, err) + require.Len(t, configs.cfgs, 2) + actual, ok := configs.Get(eth.ChainIDFromBig(expectedMainnet.L2ChainID)) + require.True(t, ok, "did not load mainnet config") + require.EqualValues(t, expectedMainnet, actual) + actual, ok = configs.Get(eth.ChainIDFromBig(expectedSepolia.L2ChainID)) + require.True(t, ok, "did not load sepolia config") + require.EqualValues(t, expectedSepolia, actual) + }) + + t.Run("UnknownConfig", func(t *testing.T) { + cfg, err := NewRollupConfigs(vm.Config{}) + require.NoError(t, err) + _, ok := cfg.Get(eth.ChainIDFromUInt64(4)) + require.False(t, ok) + }) + + t.Run("ErrorOnDuplicateConfig-Named", func(t *testing.T) { + _, err := NewRollupConfigs(vm.Config{Networks: []string{"op-mainnet", "op-mainnet"}}) + require.ErrorIs(t, err, ErrDuplicateChain) + }) + + t.Run("ErrorOnDuplicateConfig-NameAndFile", func(t *testing.T) { + expectedMainnet, err := chaincfg.GetRollupConfig("op-mainnet") + require.NoError(t, err) + mainnetPath := writeConfig(t, expectedMainnet) + _, err = NewRollupConfigs(vm.Config{Networks: []string{"op-mainnet"}, RollupConfigPaths: []string{mainnetPath}}) + require.ErrorIs(t, err, ErrDuplicateChain) + }) + + t.Run("ErrorOnDuplicateConfig-Parsed", func(t *testing.T) { + expectedMainnet, err := chaincfg.GetRollupConfig("op-mainnet") + require.NoError(t, err) + _, err = NewRollupConfigsFromParsed(expectedMainnet, expectedMainnet) + require.ErrorIs(t, err, ErrDuplicateChain) + }) +} + +func writeConfig(t *testing.T, cfg *rollup.Config) string { + dir := t.TempDir() + data, err := json.Marshal(cfg) + require.NoError(t, err) + path := filepath.Join(dir, cfg.L2ChainID.String()+".json") + err = os.WriteFile(path, data, 0600) + require.NoError(t, err) + return path +} diff --git a/op-challenger/game/fault/trace/super/split_adapter_test.go b/op-challenger/game/fault/trace/super/split_adapter_test.go index dd0859d68dc..d4c4e653434 100644 --- a/op-challenger/game/fault/trace/super/split_adapter_test.go +++ b/op-challenger/game/fault/trace/super/split_adapter_test.go @@ -113,7 +113,7 @@ func setupSplitAdapterTest(t *testing.T, depth types.Depth, prestateTimestamp ui creator := &capturingCreator{} rootProvider := &stubRootProvider{} prestateProvider := NewSuperRootPrestateProvider(rootProvider, prestateTimestamp) - traceProvider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), prestateProvider, rootProvider, eth.BlockID{}, depth, prestateTimestamp, poststateTimestamp) + traceProvider := NewSuperTraceProvider(testlog.Logger(t, log.LvlInfo), nil, prestateProvider, rootProvider, eth.BlockID{}, depth, prestateTimestamp, poststateTimestamp) adapter := SuperRootSplitAdapter(traceProvider, creator.Create) return creator, rootProvider, adapter } diff --git a/op-challenger/game/fault/trace/super/super_cannon.go b/op-challenger/game/fault/trace/super/super_cannon.go index 0ec5a38238e..50545637f7a 100644 --- a/op-challenger/game/fault/trace/super/super_cannon.go +++ b/op-challenger/game/fault/trace/super/super_cannon.go @@ -2,6 +2,7 @@ package super import ( "context" + "fmt" "math/big" "path/filepath" @@ -32,7 +33,11 @@ func NewSuperCannonTraceAccessor( prestateBlock uint64, poststateBlock uint64, ) (*trace.Accessor, error) { - outputProvider := NewSuperTraceProvider(logger, prestateProvider, rootProvider, l1Head, splitDepth, prestateBlock, poststateBlock) + rollupCfgs, err := NewRollupConfigs(cfg) + if err != nil { + return nil, fmt.Errorf("failed to load rollup configs: %w", err) + } + outputProvider := NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateBlock, poststateBlock) cannonCreator := func(ctx context.Context, localContext common.Hash, depth types.Depth, claimInfo ClaimInfo) (types.TraceProvider, error) { logger := logger.New("agreedPrestate", claimInfo.AgreedPrestate, "claim", claimInfo.Claim, "localContext", localContext) subdir := filepath.Join(dir, localContext.Hex()) diff --git a/op-challenger/game/fault/types/types.go b/op-challenger/game/fault/types/types.go index bd5cce70dcf..4af481cf800 100644 --- a/op-challenger/game/fault/types/types.go +++ b/op-challenger/game/fault/types/types.go @@ -32,6 +32,7 @@ const ( OPSuccinctGameType GameType = 6 FastGameType GameType = 254 AlphabetGameType GameType = 255 + KailuaGameType GameType = 1337 UnknownGameType GameType = math.MaxUint32 ) @@ -57,6 +58,8 @@ func (t GameType) String() string { return "fast" case AlphabetGameType: return "alphabet" + case KailuaGameType: + return "kailua" default: return fmt.Sprintf("", t) } diff --git a/op-challenger/runner/factory.go b/op-challenger/runner/factory.go index 8a9929bd0a4..f53927a1bb6 100644 --- a/op-challenger/runner/factory.go +++ b/op-challenger/runner/factory.go @@ -3,27 +3,27 @@ package runner import ( "context" "errors" - "fmt" "net/url" - "path/filepath" "github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/asterisc" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/cannon" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/prestates" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) +type prestateFetcher interface { + getPrestate(ctx context.Context, logger log.Logger, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) +} + func createTraceProvider( ctx context.Context, logger log.Logger, m vm.Metricer, cfg *config.Config, - prestateHash common.Hash, + prestateSource prestateFetcher, traceType types.TraceType, localInputs utils.LocalGameInputs, dir string, @@ -32,7 +32,7 @@ func createTraceProvider( case types.TraceTypeCannon: serverExecutor := vm.NewOpProgramServerExecutor(logger) stateConverter := cannon.NewStateConverter(cfg.Cannon) - prestate, err := getPrestate(ctx, prestateHash, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState, dir, stateConverter) + prestate, err := prestateSource.getPrestate(ctx, logger, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } @@ -41,7 +41,7 @@ func createTraceProvider( case types.TraceTypeAsterisc: serverExecutor := vm.NewOpProgramServerExecutor(logger) stateConverter := asterisc.NewStateConverter(cfg.Asterisc) - prestate, err := getPrestate(ctx, prestateHash, cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, dir, stateConverter) + prestate, err := prestateSource.getPrestate(ctx, logger, cfg.AsteriscAbsolutePreStateBaseURL, cfg.AsteriscAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } @@ -50,7 +50,7 @@ func createTraceProvider( case types.TraceTypeAsteriscKona: serverExecutor := vm.NewKonaExecutor() stateConverter := asterisc.NewStateConverter(cfg.Asterisc) - prestate, err := getPrestate(ctx, prestateHash, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) + prestate, err := prestateSource.getPrestate(ctx, logger, cfg.AsteriscKonaAbsolutePreStateBaseURL, cfg.AsteriscKonaAbsolutePreState, dir, stateConverter) if err != nil { return nil, err } @@ -59,17 +59,3 @@ func createTraceProvider( } return nil, errors.New("invalid trace type") } - -func getPrestate(ctx context.Context, prestateHash common.Hash, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) { - prestateSource := prestates.NewPrestateSource( - prestateBaseUrl, - prestatePath, - filepath.Join(dataDir, "prestates"), - stateConverter) - - prestate, err := prestateSource.PrestatePath(ctx, prestateHash) - if err != nil { - return "", fmt.Errorf("failed to get prestate %v: %w", prestateHash, err) - } - return prestate, nil -} diff --git a/op-challenger/runner/metrics.go b/op-challenger/runner/metrics.go index 4e63d66e7a0..f5f4e3c4ba0 100644 --- a/op-challenger/runner/metrics.go +++ b/op-challenger/runner/metrics.go @@ -31,11 +31,11 @@ var _ Metricer = (*Metrics)(nil) // Metrics implementation must implement RegistryMetricer to allow the metrics server to work. var _ opmetrics.RegistryMetricer = (*Metrics)(nil) -func NewMetrics() *Metrics { +func NewMetrics(runConfigs []RunConfig) *Metrics { registry := opmetrics.NewRegistry() factory := opmetrics.With(registry) - return &Metrics{ + metrics := &Metrics{ ns: Namespace, registry: registry, factory: factory, @@ -69,6 +69,14 @@ func NewMetrics() *Metrics { Help: "Number of runs that determined the output root was invalid", }, []string{"type"}), } + + for _, runConfig := range runConfigs { + metrics.successTotal.WithLabelValues(runConfig.Name).Add(0) + metrics.failuresTotal.WithLabelValues(runConfig.Name).Add(0) + metrics.invalidTotal.WithLabelValues(runConfig.Name).Add(0) + } + + return metrics } func (m *Metrics) Registry() *prometheus.Registry { diff --git a/op-challenger/runner/prestates.go b/op-challenger/runner/prestates.go new file mode 100644 index 00000000000..ecbf19dbd89 --- /dev/null +++ b/op-challenger/runner/prestates.go @@ -0,0 +1,114 @@ +package runner + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/prestates" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +type OnChainPrestateFetcher struct { + m metrics.ContractMetricer + gameFactoryAddress common.Address + gameType types.GameType + caller *batching.MultiCaller +} + +func (f *OnChainPrestateFetcher) getPrestate(ctx context.Context, logger log.Logger, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) { + gameFactory := contracts.NewDisputeGameFactoryContract(f.m, f.gameFactoryAddress, f.caller) + gameImplAddr, err := gameFactory.GetGameImpl(ctx, f.gameType) + if err != nil { + return "", fmt.Errorf("failed to load game impl: %w", err) + } + if gameImplAddr == (common.Address{}) { + return "", nil // No prestate is set, will only work if a single prestate is specified + } + gameImpl, err := contracts.NewFaultDisputeGameContract(ctx, f.m, gameImplAddr, f.caller) + if err != nil { + return "", fmt.Errorf("failed to create fault dispute game contract bindings for %v: %w", gameImplAddr, err) + } + prestateHash, err := gameImpl.GetAbsolutePrestateHash(ctx) + if err != nil { + return "", fmt.Errorf("failed to get absolute prestate hash for %v: %w", gameImplAddr, err) + } + logger.Info("Using on-chain version of prestate", "prestate", prestateHash) + hashFetcher := &HashPrestateFetcher{prestateHash: prestateHash} + return hashFetcher.getPrestate(ctx, logger, prestateBaseUrl, prestatePath, dataDir, stateConverter) +} + +type HashPrestateFetcher struct { + prestateHash common.Hash +} + +func (f *HashPrestateFetcher) getPrestate(ctx context.Context, _ log.Logger, prestateBaseUrl *url.URL, prestatePath string, dataDir string, stateConverter vm.StateConverter) (string, error) { + prestateSource := prestates.NewPrestateSource( + prestateBaseUrl, + prestatePath, + filepath.Join(dataDir, "prestates"), + stateConverter) + + prestate, err := prestateSource.PrestatePath(ctx, f.prestateHash) + if err != nil { + return "", fmt.Errorf("failed to get prestate %v: %w", f.prestateHash, err) + } + return prestate, nil +} + +// NamedPrestateFetcher downloads a file with a specified name from the prestate base URL and uses it as the prestate. +// The file is re-downloaded on each run rather than being cached. This makes it possible to run the latest builds +// from develop. +type NamedPrestateFetcher struct { + filename string +} + +func (f *NamedPrestateFetcher) getPrestate(ctx context.Context, logger log.Logger, prestateBaseUrl *url.URL, _ string, dataDir string, stateConverter vm.StateConverter) (string, error) { + targetDir := filepath.Join(dataDir, "prestates") + if err := os.MkdirAll(targetDir, 0755); err != nil { + return "", fmt.Errorf("error creating prestate dir: %w", err) + } + prestateUrl := prestateBaseUrl.JoinPath(f.filename) + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "GET", prestateUrl.String(), nil) + if err != nil { + return "", fmt.Errorf("failed to create prestate request: %w", err) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to fetch prestate from %v: %w", prestateUrl, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%w from url %v: status %v", prestates.ErrPrestateUnavailable, prestateUrl, resp.StatusCode) + } + + targetFile := filepath.Join(targetDir, f.filename) + out, err := os.Create(targetFile) + if err != nil { + return "", fmt.Errorf("failed to create prestate file %v: %w", targetFile, err) + } + defer out.Close() + if _, err := io.Copy(out, resp.Body); err != nil { + return "", fmt.Errorf("failed to write prestate to %v: %w", targetFile, err) + } + proof, _, _, err := stateConverter.ConvertStateToProof(ctx, targetFile) + if err != nil { + return "", fmt.Errorf("invalid prestate file %v: %w", f.filename, err) + } + logger.Info("Downloaded named prestate", "filename", f.filename, "prestate", proof.ClaimValue) + return targetFile, nil +} diff --git a/op-challenger/runner/runner.go b/op-challenger/runner/runner.go index 1d640fce0ff..b830c046e12 100644 --- a/op-challenger/runner/runner.go +++ b/op-challenger/runner/runner.go @@ -18,7 +18,6 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" contractMetrics "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/metrics" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" @@ -45,9 +44,10 @@ type Metricer interface { } type RunConfig struct { - TraceType types.TraceType - Name string - Prestate common.Hash + TraceType types.TraceType + Name string + Prestate common.Hash + PrestateFilename string } type Runner struct { @@ -68,7 +68,7 @@ func NewRunner(logger log.Logger, cfg *config.Config, runConfigs []RunConfig) *R log: logger, cfg: cfg, runConfigs: runConfigs, - m: NewMetrics(), + m: NewMetrics(runConfigs), } } @@ -131,17 +131,24 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, runConfig RunConfig, clie } } - prestateHash := runConfig.Prestate - if prestateHash == (common.Hash{}) { - hash, err := r.getPrestateHash(ctx, runConfig.TraceType, caller) - if err != nil { - recordError(err, runConfig.Name, r.m, r.log) - return + var prestateSource prestateFetcher + if runConfig.PrestateFilename != "" { + r.log.Info("Using named prestate", "type", runConfig.TraceType, "filename", runConfig.PrestateFilename) + prestateSource = &NamedPrestateFetcher{filename: runConfig.PrestateFilename} + } else if runConfig.Prestate == (common.Hash{}) { + r.log.Info("Using on chain prestate", "type", runConfig.TraceType) + prestateSource = &OnChainPrestateFetcher{ + m: r.m, + gameFactoryAddress: r.cfg.GameFactoryAddress, + gameType: runConfig.TraceType.GameType(), + caller: caller, } - prestateHash = hash + } else { + r.log.Info("Using specific prestate", "type", runConfig.TraceType, "hash", runConfig.Prestate) + prestateSource = &HashPrestateFetcher{prestateHash: runConfig.Prestate} } - localInputs, err := r.createGameInputs(ctx, client) + localInputs, err := r.createGameInputs(ctx, client, runConfig.Name) if err != nil { recordError(err, runConfig.Name, r.m, r.log) return @@ -155,12 +162,12 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, runConfig RunConfig, clie recordError(err, runConfig.Name, r.m, r.log) return } - err = r.runOnce(ctx, inputsLogger.With("type", runConfig.Name), runConfig.Name, runConfig.TraceType, prestateHash, localInputs, dir) + err = r.runOnce(ctx, inputsLogger.With("type", runConfig.Name), runConfig.Name, runConfig.TraceType, prestateSource, localInputs, dir) recordError(err, runConfig.Name, r.m, r.log) } -func (r *Runner) runOnce(ctx context.Context, logger log.Logger, name string, traceType types.TraceType, prestateHash common.Hash, localInputs utils.LocalGameInputs, dir string) error { - provider, err := createTraceProvider(ctx, logger, metrics.NewTypedVmMetrics(r.m, name), r.cfg, prestateHash, traceType, localInputs, dir) +func (r *Runner) runOnce(ctx context.Context, logger log.Logger, name string, traceType types.TraceType, prestateSource prestateFetcher, localInputs utils.LocalGameInputs, dir string) error { + provider, err := createTraceProvider(ctx, logger, metrics.NewTypedVmMetrics(r.m, name), r.cfg, prestateSource, traceType, localInputs, dir) if err != nil { return fmt.Errorf("failed to create trace provider: %w", err) } @@ -185,11 +192,12 @@ func (r *Runner) prepDatadir(name string) (string, error) { return dir, nil } -func (r *Runner) createGameInputs(ctx context.Context, client *sources.RollupClient) (utils.LocalGameInputs, error) { +func (r *Runner) createGameInputs(ctx context.Context, client *sources.RollupClient, traceType string) (utils.LocalGameInputs, error) { status, err := client.SyncStatus(ctx) if err != nil { return utils.LocalGameInputs{}, fmt.Errorf("failed to get rollup sync status: %w", err) } + r.log.Info("Got sync status", "status", status, "type", traceType) if status.FinalizedL2.Number == 0 { return utils.LocalGameInputs{}, errors.New("safe head is 0") @@ -199,7 +207,14 @@ func (r *Runner) createGameInputs(ctx context.Context, client *sources.RollupCli // Restrict the L1 head to a block that has actually been processed by op-node. // This only matters if op-node is behind and hasn't processed all finalized L1 blocks yet. l1Head = status.CurrentL1 + r.log.Info("Node has not completed syncing finalized L1 block, using CurrentL1 instead", "type", traceType) + } else if status.FinalizedL1.Number == 0 { + // The node is resetting its pipeline and has set FinalizedL1 to 0, use the current L1 instead as it is the best + // hope of getting a non-zero L1 block + l1Head = status.CurrentL1 + r.log.Warn("Node has zero finalized L1 block, using CurrentL1 instead", "type", traceType) } + r.log.Info("Using L1 head", "head", l1Head, "type", traceType) if l1Head.Number == 0 { return utils.LocalGameInputs{}, errors.New("l1 head is 0") } @@ -268,26 +283,6 @@ func (r *Runner) findL2BlockNumberToDispute(ctx context.Context, client *sources return l2BlockNum, nil } -func (r *Runner) getPrestateHash(ctx context.Context, traceType types.TraceType, caller *batching.MultiCaller) (common.Hash, error) { - gameFactory := contracts.NewDisputeGameFactoryContract(r.m, r.cfg.GameFactoryAddress, caller) - gameImplAddr, err := gameFactory.GetGameImpl(ctx, traceType.GameType()) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to load game impl: %w", err) - } - if gameImplAddr == (common.Address{}) { - return common.Hash{}, nil // No prestate is set, will only work if a single prestate is specified - } - gameImpl, err := contracts.NewFaultDisputeGameContract(ctx, r.m, gameImplAddr, caller) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to create fault dispute game contract bindings for %v: %w", gameImplAddr, err) - } - prestateHash, err := gameImpl.GetAbsolutePrestateHash(ctx) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to get absolute prestate hash for %v: %w", gameImplAddr, err) - } - return prestateHash, err -} - func (r *Runner) Stop(ctx context.Context) error { r.log.Info("Stopping") if !r.running.CompareAndSwap(true, false) { diff --git a/op-deployer/book/.gitignore b/op-deployer/book/.gitignore new file mode 100644 index 00000000000..7585238efed --- /dev/null +++ b/op-deployer/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/op-deployer/book/book.toml b/op-deployer/book/book.toml new file mode 100644 index 00000000000..f29e9795fd1 --- /dev/null +++ b/op-deployer/book/book.toml @@ -0,0 +1,11 @@ +[book] +authors = ["Optimism Contributors"] +language = "en" +multilingual = false +src = "src" +title = "OP Deployer Book" + +[output.html] +site-url = "/op-deployer/" +git-repository-url = "https://github.com/ethereum-optimism/optimism/tree/develop/op-deployer/book" +edit-url-template = "https://github.com/ethereum-optimism/optimism/tree/develop/op-deployer/book/{path}" \ No newline at end of file diff --git a/op-deployer/book/src/README.md b/op-deployer/book/src/README.md new file mode 100644 index 00000000000..b23006d65c7 --- /dev/null +++ b/op-deployer/book/src/README.md @@ -0,0 +1,33 @@ +# Introduction + +OP Deployer is a CLI tool that simplifies deploying and upgrading smart contracts for OP Stack chains. It also +exposes a suite of libraries that allow developers to easily manage smart contracts from their applications. + +## Goals + +### Declarative + +With OP Deployer, developers define their chain's desired configuration in a declarative configuration file. The tool +then makes the minimum number of smart contract calls required to make the deployment match the configuration. This +ensures that the implementation details of the deployment are abstracted away, and allows complex configurations to be +expressed cleanly without concern for the underlying deployment process. + +### Portable + +OP Deployer is designed to be small, portable, and easily installed. As such it is distributed as a standalone binary +with no additional dependencies. This allows it to be used in a variety of contexts, including as a CLI tool, in CI +pipelines, and as part of local development environments like [Kurtosis][kurtosis]. + +[kurtosis]: https://github.com/ethpandaops/optimism-package + +### Standard, But Extensible + +OP Deployer aims to make doing the right thing easy, and doing dangerous things hard. As such its configuration and +API are optimized for deploying and upgrading Standard OP Chains. However, it also exposes a lower-level set of +primitives and configuration directives which users can use to deploy more complex configurations if the need arises. + +## Development Status + +OP Deployer is undergoing active development and has been used for several mainnet deployments. It is considered +production-ready. However, please keep in mind that **OP Deployer has not been audited** and that any chains +deployed using OP Deployer should be checked thoroughly for correctness prior to launch. \ No newline at end of file diff --git a/op-deployer/book/src/SUMMARY.md b/op-deployer/book/src/SUMMARY.md new file mode 100644 index 00000000000..5fb3cbf9569 --- /dev/null +++ b/op-deployer/book/src/SUMMARY.md @@ -0,0 +1,17 @@ +# Summary + +[Introduction](README.md) + +# User Guide + +- [Installation](user-guide/installation.md) +- [Usage](user-guide/usage.md) + - [init](user-guide/init.md) + - [apply](user-guide/apply.md) + +# Reference Guide + +- [Architecture](reference-guide/architecture.md) + - [Deployment Pipeline](reference-guide/pipeline.md) + - [Scripting Engine](reference-guide/engine.md) +- [Artifacts Locators](reference-guide/artifacts-locators.md) diff --git a/op-deployer/book/src/reference-guide/architecture.md b/op-deployer/book/src/reference-guide/architecture.md new file mode 100644 index 00000000000..f7dca17f9b6 --- /dev/null +++ b/op-deployer/book/src/reference-guide/architecture.md @@ -0,0 +1,8 @@ +# Architecture + +This section details OP Deployer's architecture and internals. Unless you're contributing directly to OP Deployer, +you don't need to read this. + +- [Deployment Pipeline](./pipeline.md): Describes the stages of the deployment pipeline. +- [Scripting Engine](./engine.md): Describes the scripting engine that OP Deployer uses to interact + with the EVM. \ No newline at end of file diff --git a/op-deployer/book/src/reference-guide/artifacts-locators.md b/op-deployer/book/src/reference-guide/artifacts-locators.md new file mode 100644 index 00000000000..729cdc8bba1 --- /dev/null +++ b/op-deployer/book/src/reference-guide/artifacts-locators.md @@ -0,0 +1,32 @@ +# Artifacts Locators + +OP Deployer calls into precompiled contract artifacts. To make this work, OP Deployer uses artifacts locators to +point to the location of contract artifacts. While locators are nothing more than URLs, they do encode some +additional behaviors which are described here. + +## Locator Types + +Locators can be one of three types: + +- `tag://` locators, which point to a versioned contracts release. These resolve to a known URL. Artifacts + downloaded using a tagged locator are validated against a hardcoded checksum in the OP Deployer implementation. + This prevents tampering with the contract artifacts once they have been tagged. Additionally, tagged locators are + cached on disk to avoid repeated downloads. +- `https://` locators, which point to a tarball of contract artifacts somewhere on the web. HTTP locators are cached + just like tagged locators are, but they are not validated against a checksum. +- `file://` locators, which point to a directory on disk containing the artifacts. + +## Version Hints + +OP Deployer supports multiple different contract versions at the same time. Sometimes, contracts at version X are +backwards-incompatible with version Y. OP Deployer will support both versions at the same time when this happens. +However, OP Deployer needs to know which versioning behavior to use with each locator. For `tag` locators this is +easy since the behavior is encoded in the tag itself. However, it's more complicated for `https` and `file` locators. + +To support multiple versions of each contract, OP Deployer supports specifying _version hints_ in the locator. These +hints are URL fragments (e.g., the part of the URL that comes after the `#` symbol) denoting how OP Deployer should +treat the artifacts at that URL. For example, the URL `https://example.com/artifacts.tar.gz#v1` would treat the +artifacts at the URL with the versioning behavior of version `v1`. + +This only applies to `https` and `file` locators. `tag` locators are versioned by the tag itself, and any hints will +be ignored. \ No newline at end of file diff --git a/op-deployer/book/src/reference-guide/engine.md b/op-deployer/book/src/reference-guide/engine.md new file mode 100644 index 00000000000..ef68530fbe0 --- /dev/null +++ b/op-deployer/book/src/reference-guide/engine.md @@ -0,0 +1,125 @@ +# Scripting Engine + +One of OP Deployer's most powerful features is its in-memory EVM scripting engine. The scripting engine provides +similar capabilities to Forge: + +- It runs all on-chain calls in a simulated environment first, which allows the effects of on-chain calls to be + validated before they cost gas. +- It exposes Foundry cheatcodes, which allow for deep instrumentation and customization of the EVM environment. These + cheatcodes in turn allow OP Deployer to call into Solidity scripts. + +The scripting engine is really the heart of OP Deployer. Without it, OP Deployer would be nothing more than a thin +wrapper over Forge. The scripting engine enables: + +- Easy integration with existing Solidity-based tooling. +- Detailed stack traces when deployments fail. +- Fast feedback loops that prevent sending on-chain transactions that may fail. +- Live chain forking. + +For these reasons and more, the scripting engine is a critical part of OP Deployer's architecture. You will see that +almost all on-chain interactions initiated by OP Deployer use the scripting engine to call into a Solidity script. +The script then uses `vm.broadcast` to signal a transaction that should be sent on-chain. + +### Aside: Why Use Solidity Scripts? + +Solidity scripts are much more ergonomic than Go code for complex on-chain interactions. They allow for: + +- Easy integration with existing Solidity-based tooling and libraries. +- Simple ABI encoding/decoding. +- Clear separation of concerns between inter-contract calls, and the underlying RPC calls that drive them. + +The alternative is to encode all on-chain interactions in Go code. This is possible, but it is much more verbose and +requires writing bindings between Go and the Solidity ABI. These bindings are error-prone and difficult to maintain. + +## Engine Implementation + +The scripting engine is implemented in the `op-chain-ops/script` package. It extends Geth's EVM implementation with +Forge cheatcodes, and defines some tools that allow Go structs to be etched into the EVM's memory. Geth exposes +hooks that drive most of the engine's behavior. The best way to understand these further is to read the code. + +## Using the Engine + +OP Deployer uses the etching tooling described above to communicate between OP Deployer and the scripting engine. +Most Solidity scripts define an input contract, an output contract, and the script itself. The script reads data +from fields on the input contract, then sets fields on the output contract as it runs. OP Deployer defines the input +and output contracts as Go structs, like this: + +```go +package foo_script + +type FooInput struct { + Number uint64 + Bytes []byte +} + +type FooOutput struct { + Result uint64 + Bytes []byte +} +``` + +The input and output contracts are then "etched" into the EVM's memory, like this: + +```go +package foo_script + +// ... struct defs elided + +func Run(host *script.Host, input FooInput) (FooOutput, error) { + // Create a variable to hold our output + var output FooOutput + + // Make new addresses for our input/output contracts + inputAddr := host.NewScriptAddress() + outputAddr := host.NewScriptAddress() + + // Inject the input/output contracts into the EVM as precompiles + cleanupInput, err := script.WithPrecompileAtAddress[*FooInput](host, inputAddr, &input) + if err != nil { + return output, fmt.Errorf("failed to insert input precompile: %w", err) + } + defer cleanupInput() + + cleanupOutput, err := script.WithPrecompileAtAddress[*FooOutput](host, outputAddr, &output, + script.WithFieldSetter[*FooOutput]) + if err != nil { + return output, fmt.Errorf("failed to insert output precompile: %w", err) + } + defer cleanupOutput() + + // ... do stuff with the input/output contracts ... +} +``` + +The script engine will automatically generate getters and setters for the fields on the input and output contracts. +You can use the `evm:` struct tag to customize the behavior of these getters and setters. + +Finally, the script itself gets etched into the EVM's memory and executed, like this: + +```go +package foo_script + +type FooScript struct { + Run func(input, output common.Address) error +} + +func Run(host *script.Host, input FooInput) (FooOutput, error) { + // .. see implementation above... + + deployScript, cleanupDeploy, err := script.WithScript[FooScript](host, "FooScript.s.sol", "FooScript") + if err != nil { + return output, fmt.Errorf("failed to load %s script: %w", scriptFile, err) + } + defer cleanupDeploy() + + if err := deployScript.Run(inputAddr, outputAddr); err != nil { + return output, fmt.Errorf("failed to run %s script: %w", scriptFile, err) + } + + return output, nil +} +``` + +You may notice that the script is loaded from a file. To run the scripting engine, contract artifacts (**not** +source code) must exist somewhere on disk for the scripting engine to use. For more information on that, see the +chapter on artifacts locators. \ No newline at end of file diff --git a/op-deployer/book/src/reference-guide/pipeline.md b/op-deployer/book/src/reference-guide/pipeline.md new file mode 100644 index 00000000000..dbe6b18a5d2 --- /dev/null +++ b/op-deployer/book/src/reference-guide/pipeline.md @@ -0,0 +1,54 @@ +# Deployment Pipeline + +OP Deployer is architected as a pipeline where each stage is responsible for a single piece of the deployment process. +The pipeline consumes a configuration called an *intent* which describes the desired state of the chain, and +produces a file called the *state* which describes the current state of the chain during and after the deployment. +The steps of the pipeline are: + +1. Initialization +2. Superchain Deployment +3. Implementations Deployment +4. OP Chain Deployment +5. Alt-DA Deployment +6. Dispute Game Deployment +7. L2 Genesis Generation +8. Setting Start Block + +State is written to disk after each state. This allows the pipeline to be restarted from any point in the event of a +recoverable error. + +We'll cover each of these stages in more detail below. + +### Initialization + +During this step, OP Deployer sets initial values for the pipeline based on the user's intent. These values will be +used by downstream stages. For example, if the user is deploying using an existing set of Superchain contracts, +those contracts will be inserted into the state during this step. + +### Superchain/Implementations Deployment + +Next, the base contracts for the chain are deployed. This includes Superchain-wide contracts like the +`SuperchainConfig` and `ProtocolVersions`, as well as implementation contracts that will be used for the OP Chain +deployment in the future like the OP Contracts Manager (OPCM). + +Most chains will be configured to use existing implementations. In this case, these steps will be skipped. + +### OP Chain Deployment + +The OP Chain itself is deployed during this step. Multiple chains will be deployed if they are specified in the +intent. The deployment works by calling into the OPCM, which will emit an event for each successfully-deployed chain. + +### Customizations Deployment + +The next two steps (Alt-DA and Dispute Game) deploy customizations. As their names imply, they deploy Alt-DA and +additional dispute game contracts. Typically, these steps will be skipped as they are mostly useful in testing. + +### L2 Genesis Generation + +This step generates the L2 Genesis file which is used to initialize the chain. This file is generated by calling +into `L2Genesis.sol`, and dumping the outputted state. + +### Setting Start Block + +Lastly, the start block is set to the current block number on L1. This is done last to ensure that the start block +is relatively recent, since the deployment process can take arbitrarily long. \ No newline at end of file diff --git a/op-deployer/book/src/user-guide/apply.md b/op-deployer/book/src/user-guide/apply.md new file mode 100644 index 00000000000..e4b013ec0bb --- /dev/null +++ b/op-deployer/book/src/user-guide/apply.md @@ -0,0 +1,42 @@ +# The Apply Command + +Once you have [initialized][init] your intent and state files, you can use the `apply` command to perform the +deployment. + +[init]: init.md + +You can call the `apply` command like this: + +```shell + +op-deployer apply \ + --workdir \ + <... additional arguments ...> +``` + +You will need to specify additional arguments depending on what you're trying to do. See below for a reference of each +supported CLI arg. + +### `--deployment-target` + +**Default:** `live` + +`--deployment-target` specifies where each chain should be deployed to. It can be one of the following values: + +- `live`: Deploys to a live L1. Concretely, this means that OP Deployer will send transactions identified by + `vm.broadcast` calls to L1. `--l1-rpc-url` and `--private-key` must be specified when using this target. +- `genesis`: Deploys to an L1 genesis file. This is useful for testing or local development purposes. You do not need to + specify any additional arguments when using this target. +- `calldata`: Deploys to a calldata file. This is useful for generating inputs to multisig wallets for future execution. +- `noop`: Doesn't deploy anything. This is useful for performing a dry-run of the deployment process prior to another + deployment target. + +### `--l1-rpc-url` + +Defines the RPC URL of the L1 chain to deploy to. + +### `--private-key` + +Defines the private key to use for signing transactions. This is only required for deployment targets that involve +sending live transactions. Note that ownership over each L2 is transferred to the proxy admin owner specified in the +intent after the deployment completes, so it's OK to use a hot key for this purpose. \ No newline at end of file diff --git a/op-deployer/book/src/user-guide/init.md b/op-deployer/book/src/user-guide/init.md new file mode 100644 index 00000000000..9f272e24b02 --- /dev/null +++ b/op-deployer/book/src/user-guide/init.md @@ -0,0 +1,63 @@ +# The Init Command + +The `init` command is used to create a new intent and state file in the specified directory. This command is the +starting point of each new deployment. + +The `init` command is used like this: + +```shell +op-deployer init \ + --l1-chain-id \ + --l2-chain-ids \ + --output-dir \ + --intent-config-type +``` + +You should then see the following files appear in your output directory: + +``` +outdir +├── intent.toml +└── state.json +``` + +The `intent.toml` file is where you specify the configuration for your deployment. The `state.json` file is where OP +Deployer will output the current state of the deployment after each [stage][stages] of the deployment. + +Your intent should look something like this: + +```toml +configType = "standard" +l1ChainID = 11155420 +fundDevAccounts = false +useInterop = false +l1ContractsLocator = "tag://op-contracts/v1.8.0-rc.4" +l2ContractsLocator = "op-contracts/v1.7.0-beta.1+l2-contracts" + +[superchainRoles] + proxyAdminOwner = "0xeAAA3fd0358F476c86C26AE77B7b89a069730570" + protocolVersionsOwner = "0xeAAA3fd0358F476c86C26AE77B7b89a069730570" + guardian = "0xeAAA3fd0358F476c86C26AE77B7b89a069730570" + +[[chains]] + id = "0x0000000000000000000000000000000000000000000000000000000000002390" + baseFeeVaultRecipient = "0x0000000000000000000000000000000000000000" + l1FeeVaultRecipient = "0x0000000000000000000000000000000000000000" + sequencerFeeVaultRecipient = "0x0000000000000000000000000000000000000000" + eip1559DenominatorCanyon = 250 + eip1559Denominator = 50 + eip1559Elasticity = 6 + [chains.roles] + l1ProxyAdminOwner = "0x0000000000000000000000000000000000000000" + l2ProxyAdminOwner = "0x0000000000000000000000000000000000000000" + systemConfigOwner = "0x0000000000000000000000000000000000000000" + unsafeBlockSigner = "0x0000000000000000000000000000000000000000" + batcher = "0x0000000000000000000000000000000000000000" + proposer = "0x0000000000000000000000000000000000000000" + challenger = "0x0000000000000000000000000000000000000000" +``` + +Before you can use your intent file for a deployment, you will need to update all zero values to whatever is +appropriate for your chain. + +[stages]: ../architecture/pipeline.md diff --git a/op-deployer/book/src/user-guide/installation.md b/op-deployer/book/src/user-guide/installation.md new file mode 100644 index 00000000000..ad6b54cbf52 --- /dev/null +++ b/op-deployer/book/src/user-guide/installation.md @@ -0,0 +1,23 @@ +# Installation + +OP Deployer can be installed both from pre-built binaries and from source. This guide will walk you through both +methods. + +## Install From Binaries + +Installing OP Deployer from pre-built binaries is the easiest and most preferred way to get started. To install from +binaries, download the latest release from the [releases page][releases] and extract the binary to a directory in your +`$PATH`. + +[releases]: https://github.com/ethereum-optimism/optimism/releases?q=op-deployer&expanded=true + +## Install From Source + +To install from source, you will need Go, `just`, and `git`. Then, run the following: + +```shell +git clone git@github.com:ethereum-optimism/ethereum-optimism.git # you can skip this if you already have the repo +cd ethereum-optimism/op-deployer +just build +cp ./bin/op-deployer /usr/local/bin/op-deployer # or any other directory in your $PATH +``` \ No newline at end of file diff --git a/op-deployer/book/src/user-guide/usage.md b/op-deployer/book/src/user-guide/usage.md new file mode 100644 index 00000000000..03a5b7073e1 --- /dev/null +++ b/op-deployer/book/src/user-guide/usage.md @@ -0,0 +1,15 @@ +# Usage + +The OP Deployer CLI tool is used to deploy and manage your smart contracts. After [installing][installation] OP +Deployer, you can use `op-deployer help` to view the available commands. + +[installation]: installation.md + +This following sections provide in-depth information on the different commands available. + +- [`op-deployer init`][init]: Initializes a new intent and state file. +- [`op-deployer apply`][apply]: Deploys a new OP Chain based on the supplied intent. +- `op-deployer bootstrap`: Deploys shared contract instances for use with future invocations of OP Deployer. + +[init]: init.md +[apply]: apply.md \ No newline at end of file diff --git a/op-deployer/cmd/op-deployer/main.go b/op-deployer/cmd/op-deployer/main.go index c8f3d26ce63..504dd4d04a2 100644 --- a/op-deployer/cmd/op-deployer/main.go +++ b/op-deployer/cmd/op-deployer/main.go @@ -5,6 +5,7 @@ import ( "os" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/manage" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/bootstrap" @@ -44,6 +45,12 @@ func main() { Flags: cliapp.ProtectFlags(deployer.ApplyFlags), Action: deployer.ApplyCLI(), }, + { + Name: "upgrade", + Usage: "upgrades contracts by sending tx to OPCM.upgrade function", + Flags: cliapp.ProtectFlags(deployer.UpgradeFlags), + Subcommands: upgrade.Commands, + }, { Name: "bootstrap", Usage: "bootstraps global contract instances", diff --git a/op-deployer/pkg/deployer/apply.go b/op-deployer/pkg/deployer/apply.go index 828b10b36bf..fb93daa3c28 100644 --- a/op-deployer/pkg/deployer/apply.go +++ b/op-deployer/pkg/deployer/apply.go @@ -152,33 +152,19 @@ func ApplyPipeline( } st := opts.State - progressor := func(curr, total int64) { - opts.Logger.Info("artifacts download progress", "current", curr, "total", total) - } - - l1ArtifactsFS, cleanupL1, err := artifacts.Download(ctx, intent.L1ContractsLocator, progressor) + l1ArtifactsFS, err := artifacts.Download(ctx, intent.L1ContractsLocator, artifacts.BarProgressor()) if err != nil { return fmt.Errorf("failed to download L1 artifacts: %w", err) } - defer func() { - if err := cleanupL1(); err != nil { - opts.Logger.Warn("failed to clean up L1 artifacts", "err", err) - } - }() var l2ArtifactsFS foundry.StatDirFs if intent.L1ContractsLocator.Equal(intent.L2ContractsLocator) { l2ArtifactsFS = l1ArtifactsFS } else { - l2Afs, cleanupL2, err := artifacts.Download(ctx, intent.L2ContractsLocator, progressor) + l2Afs, err := artifacts.Download(ctx, intent.L2ContractsLocator, artifacts.BarProgressor()) if err != nil { return fmt.Errorf("failed to download L2 artifacts: %w", err) } - defer func() { - if err := cleanupL2(); err != nil { - opts.Logger.Warn("failed to clean up L2 artifacts", "err", err) - } - }() l2ArtifactsFS = l2Afs } diff --git a/op-deployer/pkg/deployer/artifacts/download.go b/op-deployer/pkg/deployer/artifacts/download.go new file mode 100644 index 00000000000..0e7b90d058c --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/download.go @@ -0,0 +1,221 @@ +package artifacts + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "net/http" + "net/url" + "os" + "path" + "strings" + "sync" + + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" +) + +var ErrUnsupportedArtifactsScheme = errors.New("unsupported artifacts URL scheme") + +type Downloader interface { + Download(ctx context.Context, url string, progress DownloadProgressor) (string, error) +} + +type Extractor interface { + Extract(src string, dest string) (string, error) +} + +func Download(ctx context.Context, loc *Locator, progressor DownloadProgressor) (foundry.StatDirFs, error) { + if progressor == nil { + progressor = NoopProgressor() + } + + var u *url.URL + var err error + var checker integrityChecker + if loc.IsTag() { + u, err = standard.ArtifactsURLForTag(loc.Tag) + if err != nil { + return nil, fmt.Errorf("failed to get standard artifacts URL for tag %s: %w", loc.Tag, err) + } + + hash, err := standard.ArtifactsHashForTag(loc.Tag) + if err != nil { + return nil, fmt.Errorf("failed to get standard artifacts hash for tag %s: %w", loc.Tag, err) + } + + checker = &hashIntegrityChecker{hash: hash} + } else { + u = loc.URL + checker = new(noopIntegrityChecker) + } + + var artifactsFS fs.FS + switch u.Scheme { + case "http", "https": + artifactsFS, err = downloadHTTP(ctx, u, progressor, checker) + if err != nil { + return nil, fmt.Errorf("failed to download artifacts: %w", err) + } + case "file": + artifactsFS = os.DirFS(u.Path) + default: + return nil, ErrUnsupportedArtifactsScheme + } + return artifactsFS.(foundry.StatDirFs), nil +} + +func downloadHTTP(ctx context.Context, u *url.URL, progressor DownloadProgressor, checker integrityChecker) (fs.FS, error) { + cacher := &CachingDownloader{ + d: new(HTTPDownloader), + } + + tarballPath, err := cacher.Download(ctx, u.String(), progressor) + if err != nil { + return nil, fmt.Errorf("failed to download artifacts: %w", err) + } + tmpDir, err := os.MkdirTemp("", "op-deployer-artifacts-*") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + extractor := &TarballExtractor{ + checker: checker, + } + if err := extractor.Extract(tarballPath, tmpDir); err != nil { + return nil, fmt.Errorf("failed to extract tarball: %w", err) + } + return os.DirFS(path.Join(tmpDir, "forge-artifacts")), nil +} + +type HTTPDownloader struct{} + +func (d *HTTPDownloader) Download(ctx context.Context, url string, progress DownloadProgressor) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("failed to create request: %w", err) + } + + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("failed to download artifacts: %w", err) + } + if res.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to download artifacts: invalid status code %s", res.Status) + } + defer res.Body.Close() + + tmpFile, err := os.CreateTemp("", "op-deployer-artifacts-*") + if err != nil { + return "", fmt.Errorf("failed to create temporary file: %w", err) + } + + pr := &progressReader{ + r: res.Body, + progress: progress, + total: res.ContentLength, + } + if _, err := io.Copy(tmpFile, pr); err != nil { + return "", fmt.Errorf("failed to write to temporary file: %w", err) + } + + return tmpFile.Name(), nil +} + +type CachingDownloader struct { + d Downloader + mtx sync.Mutex +} + +func (d *CachingDownloader) Download(ctx context.Context, url string, progress DownloadProgressor) (string, error) { + d.mtx.Lock() + defer d.mtx.Unlock() + + cachePath := fmt.Sprintf("/tmp/op-deployer-cache/%x.tgz", sha256.Sum256([]byte(url))) + if _, err := os.Stat(cachePath); err == nil { + return cachePath, nil + } + tmpPath, err := d.d.Download(ctx, url, progress) + if err != nil { + return "", fmt.Errorf("failed to download: %w", err) + } + if err := os.MkdirAll("/tmp/op-deployer-cache", 0755); err != nil { + return "", fmt.Errorf("failed to create cache directory: %w", err) + } + if err := os.Rename(tmpPath, cachePath); err != nil { + return "", fmt.Errorf("failed to move downloaded file to cache: %w", err) + } + return cachePath, nil +} + +type TarballExtractor struct { + checker integrityChecker +} + +func (e *TarballExtractor) Extract(src string, dest string) error { + data, err := os.ReadFile(src) + if err != nil { + return fmt.Errorf("failed to read tarball: %w", err) + } + + if err := e.checker.CheckIntegrity(data); err != nil { + return fmt.Errorf("integrity check failed: %w", err) + } + + gzr, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + if err := untar(dest, tr); err != nil { + return fmt.Errorf("failed to untar: %w", err) + } + + return nil +} + +func untar(dir string, tr *tar.Reader) error { + for { + hdr, err := tr.Next() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + + cleanedName := path.Clean(hdr.Name) + if strings.Contains(cleanedName, "..") { + return fmt.Errorf("invalid file path: %s", hdr.Name) + } + dst := path.Join(dir, cleanedName) + if hdr.FileInfo().IsDir() { + if err := os.MkdirAll(dst, 0o755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + continue + } + + f, err := os.Create(dst) + buf := bufio.NewWriter(f) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + if _, err := io.Copy(buf, tr); err != nil { + _ = f.Close() + return fmt.Errorf("failed to write file: %w", err) + } + if err := buf.Flush(); err != nil { + return fmt.Errorf("failed to flush buffer: %w", err) + } + _ = f.Close() + } +} diff --git a/op-deployer/pkg/deployer/artifacts/download_test.go b/op-deployer/pkg/deployer/artifacts/download_test.go new file mode 100644 index 00000000000..24dcaee09b5 --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/download_test.go @@ -0,0 +1,133 @@ +package artifacts + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "os" + "sync/atomic" + "testing" + "time" + + "github.com/minio/sha256-simd" + + "github.com/ethereum/go-ethereum/common" + + "github.com/stretchr/testify/require" +) + +func TestDownloadArtifacts_MockArtifacts(t *testing.T) { + f, err := os.OpenFile("testdata/artifacts.tar.gz", os.O_RDONLY, 0o644) + require.NoError(t, err) + defer f.Close() + + var callCount int32 + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err := io.Copy(w, f) + require.NoError(t, err) + // Seek to beginning of file for next request + _, err = f.Seek(0, 0) + require.NoError(t, err) + atomic.AddInt32(&callCount, 1) + })) + defer ts.Close() + + ctx := context.Background() + artifactsURL, err := url.Parse(ts.URL) + require.NoError(t, err) + loc := &Locator{ + URL: artifactsURL, + } + + t.Run("success", func(t *testing.T) { + fs, err := Download(ctx, loc, nil) + require.NoError(t, err) + require.NotNil(t, fs) + + info, err := fs.Stat("WETH98.sol/WETH98.json") + require.NoError(t, err) + require.Greater(t, info.Size(), int64(0)) + }) + + t.Run("bad integrity", func(t *testing.T) { + _, err := downloadHTTP(ctx, loc.URL, nil, &hashIntegrityChecker{ + hash: common.Hash{'B', 'A', 'D'}, + }) + require.Error(t, err) + require.ErrorContains(t, err, "integrity check failed") + }) + + correctIntegrity := &hashIntegrityChecker{ + hash: common.HexToHash("0x0f814df0c4293aaaadd468ac37e6c92f0b40fd21df848076835cb2c21d2a516f"), + } + + t.Run("ok integrity", func(t *testing.T) { + _, err := downloadHTTP(ctx, loc.URL, nil, correctIntegrity) + require.NoError(t, err) + }) + + t.Run("caching works", func(t *testing.T) { + u, err := url.Parse(loc.URL.String()) + require.NoError(t, err) + u.Path = fmt.Sprintf("/different-path-%d", time.Now().UnixNano()) + + startCalls := atomic.LoadInt32(&callCount) + _, err = downloadHTTP(ctx, u, nil, correctIntegrity) + require.NoError(t, err) + startCalls++ + require.Equal(t, startCalls, atomic.LoadInt32(&callCount)) + + t.Cleanup(func() { + require.NoError(t, os.Remove( + fmt.Sprintf("/tmp/op-deployer-cache/%x.tgz", sha256.Sum256([]byte(u.String()))), + )) + }) + + _, err = downloadHTTP(ctx, u, nil, correctIntegrity) + require.NoError(t, err) + require.Equal(t, startCalls, atomic.LoadInt32(&callCount)) + }) + + t.Run("caching validates integrity", func(t *testing.T) { + u, err := url.Parse(loc.URL.String()) + require.NoError(t, err) + u.Path = fmt.Sprintf("/different-path-%d", time.Now().UnixNano()) + + _, err = downloadHTTP(ctx, u, nil, correctIntegrity) + require.NoError(t, err) + + cacheFile := fmt.Sprintf("/tmp/op-deployer-cache/%x.tgz", sha256.Sum256([]byte(u.String()))) + t.Cleanup(func() { + require.NoError(t, os.Remove(cacheFile)) + }) + + cacheF, err := os.OpenFile(cacheFile, os.O_RDWR, 0o644) + require.NoError(t, err) + _, err = cacheF.Write([]byte("bad data")) + require.NoError(t, err) + require.NoError(t, cacheF.Close()) + + _, err = downloadHTTP(ctx, u, nil, correctIntegrity) + require.ErrorContains(t, err, "integrity check failed") + }) +} + +func TestDownloadArtifacts_TaggedVersions(t *testing.T) { + tags := []string{ + "op-contracts/v1.6.0", + "op-contracts/v1.7.0-beta.1+l2-contracts", + } + for _, tag := range tags { + t.Run(tag, func(t *testing.T) { + t.Parallel() + loc := MustNewLocatorFromTag(tag) + _, err := Download(context.Background(), loc, nil) + require.NoError(t, err) + }) + } +} diff --git a/op-deployer/pkg/deployer/artifacts/downloader.go b/op-deployer/pkg/deployer/artifacts/downloader.go deleted file mode 100644 index 7e566952e09..00000000000 --- a/op-deployer/pkg/deployer/artifacts/downloader.go +++ /dev/null @@ -1,207 +0,0 @@ -package artifacts - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/gzip" - "context" - "crypto/sha256" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path" - "strings" - "time" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" -) - -var ErrUnsupportedArtifactsScheme = errors.New("unsupported artifacts URL scheme") - -type DownloadProgressor func(current, total int64) - -func NoopDownloadProgressor(current, total int64) {} - -type CleanupFunc func() error - -var noopCleanup = func() error { return nil } - -func LogProgressor(lgr log.Logger) DownloadProgressor { - return func(curr, total int64) { - lgr.Info("artifacts download progress", "current", curr, "total", total) - } -} - -func Download(ctx context.Context, loc *Locator, progress DownloadProgressor) (foundry.StatDirFs, CleanupFunc, error) { - var u *url.URL - var err error - var checker integrityChecker - if loc.IsTag() { - u, err = standard.ArtifactsURLForTag(loc.Tag) - if err != nil { - return nil, nil, fmt.Errorf("failed to get standard artifacts URL for tag %s: %w", loc.Tag, err) - } - - hash, err := standard.ArtifactsHashForTag(loc.Tag) - if err != nil { - return nil, nil, fmt.Errorf("failed to get standard artifacts hash for tag %s: %w", loc.Tag, err) - } - - checker = &hashIntegrityChecker{hash: hash} - } else { - u = loc.URL - checker = &noopIntegrityChecker{} - } - - return downloadURL(ctx, u, progress, checker) -} - -type integrityChecker interface { - CheckIntegrity(data []byte) error -} - -type hashIntegrityChecker struct { - hash common.Hash -} - -func (h *hashIntegrityChecker) CheckIntegrity(data []byte) error { - hash := sha256.Sum256(data) - if hash != h.hash { - return fmt.Errorf("integrity check failed - expected: %x, got: %x", h.hash, hash) - } - return nil -} - -type noopIntegrityChecker struct{} - -func (noopIntegrityChecker) CheckIntegrity(data []byte) error { - return nil -} - -func downloadURL(ctx context.Context, u *url.URL, progress DownloadProgressor, checker integrityChecker) (foundry.StatDirFs, CleanupFunc, error) { - switch u.Scheme { - case "http", "https": - req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) - if err != nil { - return nil, nil, fmt.Errorf("failed to create request: %w", err) - } - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, nil, fmt.Errorf("failed to download artifacts: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return nil, nil, fmt.Errorf("failed to download artifacts: invalid status code %s", resp.Status) - } - - tmpDir, err := os.MkdirTemp("", "op-deployer-artifacts-*") - if err != nil { - return nil, nil, fmt.Errorf("failed to create temp dir: %w", err) - } - - pr := &progressReader{ - r: resp.Body, - progress: progress, - total: resp.ContentLength, - } - - data, err := io.ReadAll(pr) - if err != nil { - return nil, nil, fmt.Errorf("failed to read response body: %w", err) - } - - if err := checker.CheckIntegrity(data); err != nil { - return nil, nil, fmt.Errorf("failed to check integrity: %w", err) - } - - gr, err := gzip.NewReader(bytes.NewReader(data)) - if err != nil { - return nil, nil, fmt.Errorf("failed to create gzip reader: %w", err) - } - defer gr.Close() - - tr := tar.NewReader(gr) - if err := untar(tmpDir, tr); err != nil { - return nil, nil, fmt.Errorf("failed to untar: %w", err) - } - - fs := os.DirFS(path.Join(tmpDir, "forge-artifacts")) - cleanup := func() error { - return os.RemoveAll(tmpDir) - } - return fs.(foundry.StatDirFs), cleanup, nil - case "file": - fs := os.DirFS(u.Path) - return fs.(foundry.StatDirFs), noopCleanup, nil - default: - return nil, nil, ErrUnsupportedArtifactsScheme - } -} - -type progressReader struct { - r io.Reader - progress DownloadProgressor - curr int64 - total int64 - lastPrint time.Time -} - -func (pr *progressReader) Read(p []byte) (int, error) { - n, err := pr.r.Read(p) - pr.curr += int64(n) - if pr.progress != nil && time.Since(pr.lastPrint) > 1*time.Second { - pr.progress(pr.curr, pr.total) - pr.lastPrint = time.Now() - } - return n, err -} - -func untar(dir string, tr *tar.Reader) error { - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } - if err != nil { - return fmt.Errorf("failed to read tar header: %w", err) - } - - cleanedName := path.Clean(hdr.Name) - if strings.Contains(cleanedName, "..") { - return fmt.Errorf("invalid file path: %s", hdr.Name) - } - dst := path.Join(dir, cleanedName) - if hdr.FileInfo().IsDir() { - if err := os.MkdirAll(dst, 0o755); err != nil { - return fmt.Errorf("failed to create directory: %w", err) - } - continue - } - - f, err := os.Create(dst) - buf := bufio.NewWriter(f) - if err != nil { - return fmt.Errorf("failed to create file: %w", err) - } - if _, err := io.Copy(buf, tr); err != nil { - _ = f.Close() - return fmt.Errorf("failed to write file: %w", err) - } - if err := buf.Flush(); err != nil { - return fmt.Errorf("failed to flush buffer: %w", err) - } - _ = f.Close() - } -} diff --git a/op-deployer/pkg/deployer/artifacts/downloader_test.go b/op-deployer/pkg/deployer/artifacts/downloader_test.go deleted file mode 100644 index cf4ef4742c9..00000000000 --- a/op-deployer/pkg/deployer/artifacts/downloader_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package artifacts - -import ( - "context" - "io" - "net/http" - "net/http/httptest" - "net/url" - "os" - "testing" - - "github.com/ethereum/go-ethereum/common" - - "github.com/stretchr/testify/require" -) - -func TestDownloadArtifacts_MockArtifacts(t *testing.T) { - f, err := os.OpenFile("testdata/artifacts.tar.gz", os.O_RDONLY, 0o644) - require.NoError(t, err) - defer f.Close() - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - _, err := io.Copy(w, f) - require.NoError(t, err) - // Seek to beginning of file for next request - _, err = f.Seek(0, 0) - require.NoError(t, err) - })) - defer ts.Close() - - ctx := context.Background() - artifactsURL, err := url.Parse(ts.URL) - require.NoError(t, err) - loc := &Locator{ - URL: artifactsURL, - } - - t.Run("success", func(t *testing.T) { - fs, cleanup, err := Download(ctx, loc, nil) - require.NoError(t, err) - require.NotNil(t, fs) - defer func() { - require.NoError(t, cleanup()) - }() - - info, err := fs.Stat("WETH98.sol/WETH98.json") - require.NoError(t, err) - require.Greater(t, info.Size(), int64(0)) - }) - - t.Run("bad integrity", func(t *testing.T) { - _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ - hash: common.Hash{'B', 'A', 'D'}, - }) - require.Error(t, err) - require.ErrorContains(t, err, "integrity check failed") - }) - - t.Run("ok integrity", func(t *testing.T) { - _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ - hash: common.HexToHash("0x0f814df0c4293aaaadd468ac37e6c92f0b40fd21df848076835cb2c21d2a516f"), - }) - require.NoError(t, err) - }) -} - -func TestDownloadArtifacts_TaggedVersions(t *testing.T) { - tags := []string{ - "op-contracts/v1.6.0", - "op-contracts/v1.7.0-beta.1+l2-contracts", - } - for _, tag := range tags { - t.Run(tag, func(t *testing.T) { - t.Parallel() - - loc := MustNewLocatorFromTag(tag) - _, cleanup, err := Download(context.Background(), loc, nil) - t.Cleanup(func() { - require.NoError(t, cleanup()) - }) - require.NoError(t, err) - }) - } -} diff --git a/op-deployer/pkg/deployer/artifacts/integrity.go b/op-deployer/pkg/deployer/artifacts/integrity.go new file mode 100644 index 00000000000..17c6365272b --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/integrity.go @@ -0,0 +1,30 @@ +package artifacts + +import ( + "crypto/sha256" + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +type integrityChecker interface { + CheckIntegrity(data []byte) error +} + +type hashIntegrityChecker struct { + hash common.Hash +} + +func (h *hashIntegrityChecker) CheckIntegrity(data []byte) error { + hash := sha256.Sum256(data) + if hash != h.hash { + return fmt.Errorf("integrity check failed - expected: %x, got: %x", h.hash, hash) + } + return nil +} + +type noopIntegrityChecker struct{} + +func (noopIntegrityChecker) CheckIntegrity([]byte) error { + return nil +} diff --git a/op-deployer/pkg/deployer/artifacts/integrity_test.go b/op-deployer/pkg/deployer/artifacts/integrity_test.go new file mode 100644 index 00000000000..4c909153c49 --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/integrity_test.go @@ -0,0 +1,46 @@ +package artifacts + +import ( + "crypto/sha256" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHashIntegrityChecker_CheckIntegrity(t *testing.T) { + tests := []struct { + name string + data []byte + setupHash [32]byte + expectError bool + }{ + { + name: "valid hash matches data", + data: []byte("test data"), + setupHash: sha256.Sum256([]byte("test data")), + expectError: false, + }, + { + name: "invalid hash doesn't match data", + data: []byte("test data"), + setupHash: sha256.Sum256([]byte("different data")), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + checker := &hashIntegrityChecker{ + hash: tt.setupHash, + } + + err := checker.CheckIntegrity(tt.data) + + if tt.expectError { + require.ErrorContains(t, err, "integrity check failed") + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/op-deployer/pkg/deployer/artifacts/locator.go b/op-deployer/pkg/deployer/artifacts/locator.go index b5d7b2b6a94..44afb60449e 100644 --- a/op-deployer/pkg/deployer/artifacts/locator.go +++ b/op-deployer/pkg/deployer/artifacts/locator.go @@ -41,17 +41,25 @@ func MustNewLocatorFromTag(tag string) *Locator { return loc } -func MustNewLocatorFromURL(u string) *Locator { +func NewLocatorFromURL(u string) (*Locator, error) { if strings.HasPrefix(u, "tag://") { - return MustNewLocatorFromTag(strings.TrimPrefix(u, "tag://")) + return NewLocatorFromTag(strings.TrimPrefix(u, "tag://")) } parsedURL, err := url.Parse(u) if err != nil { - panic(err) + return nil, fmt.Errorf("failed to parse URL: %w", err) } return &Locator{ URL: parsedURL, + }, nil +} + +func MustNewLocatorFromURL(u string) *Locator { + loc, err := NewLocatorFromURL(u) + if err != nil { + panic(err) } + return loc } func MustNewFileLocator(path string) *Locator { diff --git a/op-deployer/pkg/deployer/artifacts/progress.go b/op-deployer/pkg/deployer/artifacts/progress.go new file mode 100644 index 00000000000..d6a3eed5750 --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/progress.go @@ -0,0 +1,48 @@ +package artifacts + +import ( + "io" + "sync" + + "github.com/ethereum/go-ethereum/log" + "github.com/schollz/progressbar/v3" +) + +type DownloadProgressor func(current, total int64) + +func BarProgressor() DownloadProgressor { + var bar *progressbar.ProgressBar + var init sync.Once + return func(curr, total int64) { + init.Do(func() { + bar = progressbar.DefaultBytes(total) + }) + _ = bar.Set64(curr) + } +} + +func NoopProgressor() DownloadProgressor { + return func(curr, total int64) {} +} + +func LogProgressor(lgr log.Logger) DownloadProgressor { + return func(curr, total int64) { + lgr.Info("artifacts download progress", "current", curr, "total", total) + } +} + +type progressReader struct { + r io.Reader + progress DownloadProgressor + curr int64 + total int64 +} + +func (pr *progressReader) Read(p []byte) (int, error) { + n, err := pr.r.Read(p) + pr.curr += int64(n) + if pr.progress != nil { + pr.progress(pr.curr, pr.total) + } + return n, err +} diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index 8fb7d2ea7ae..30762160705 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -128,6 +128,11 @@ var ( Usage: "Protocol versions proxy.", EnvVars: deployer.PrefixEnvVar("PROTOCOL_VERSIONS_PROXY"), } + UpgradeControllerFlag = &cli.StringFlag{ + Name: "upgrade-controller", + Usage: "Upgrade controller.", + EnvVars: deployer.PrefixEnvVar("UPGRADE_CONTROLLER"), + } UseInteropFlag = &cli.BoolFlag{ Name: "use-interop", Usage: "If true, deploy Interop implementations.", @@ -149,6 +154,7 @@ var ImplementationsFlags = []cli.Flag{ DisputeGameFinalityDelaySecondsFlag, SuperchainConfigProxyFlag, ProtocolVersionsProxyFlag, + UpgradeControllerFlag, UseInteropFlag, } diff --git a/op-deployer/pkg/deployer/bootstrap/implementations.go b/op-deployer/pkg/deployer/bootstrap/implementations.go index 9d2841a0a8c..c868aeb60dc 100644 --- a/op-deployer/pkg/deployer/bootstrap/implementations.go +++ b/op-deployer/pkg/deployer/bootstrap/implementations.go @@ -8,6 +8,8 @@ import ( "math/big" "strings" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" @@ -94,6 +96,9 @@ func (c *ImplementationsConfig) Check() error { if c.ProtocolVersionsProxy == (common.Address{}) { return errors.New("protocol versions proxy must be specified") } + if c.UpgradeController == (common.Address{}) { + return errors.New("upgrade controller must be specified") + } return nil } @@ -106,6 +111,7 @@ func ImplementationsCLI(cliCtx *cli.Context) error { if err := cliutil.PopulateStruct(&cfg, cliCtx); err != nil { return fmt.Errorf("failed to populate config: %w", err) } + cfg.Logger = l ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) outfile := cliCtx.String(OutfileFlagName) @@ -126,19 +132,11 @@ func Implementations(ctx context.Context, cfg ImplementationsConfig) (opcm.Deplo } lgr := cfg.Logger - progressor := func(curr, total int64) { - lgr.Info("artifacts download progress", "current", curr, "total", total) - } - artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor()) if err != nil { return dio, fmt.Errorf("failed to download artifacts: %w", err) } - defer func() { - if err := cleanup(); err != nil { - lgr.Warn("failed to clean up artifacts", "err", err) - } - }() l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { @@ -181,6 +179,11 @@ func Implementations(ctx context.Context, cfg ImplementationsConfig) (opcm.Deplo return dio, fmt.Errorf("failed to create script host: %w", err) } + superProxyAdmin, err := standard.SuperchainProxyAdminAddrFor(chainID.Uint64()) + if err != nil { + return dio, fmt.Errorf("failed to get superchain proxy admin address: %w", err) + } + if dio, err = opcm.DeployImplementations( l1Host, opcm.DeployImplementationsInput{ @@ -193,6 +196,7 @@ func Implementations(ctx context.Context, cfg ImplementationsConfig) (opcm.Deplo L1ContractsRelease: cfg.L1ContractsRelease, SuperchainConfigProxy: cfg.SuperchainConfigProxy, ProtocolVersionsProxy: cfg.ProtocolVersionsProxy, + SuperchainProxyAdmin: superProxyAdmin, UpgradeController: cfg.UpgradeController, UseInterop: cfg.UseInterop, }, diff --git a/op-deployer/pkg/deployer/bootstrap/proxy.go b/op-deployer/pkg/deployer/bootstrap/proxy.go index ee8399b4bb5..88844197490 100644 --- a/op-deployer/pkg/deployer/bootstrap/proxy.go +++ b/op-deployer/pkg/deployer/bootstrap/proxy.go @@ -110,19 +110,10 @@ func Proxy(ctx context.Context, cfg ProxyConfig) (opcm.DeployProxyOutput, error) } lgr := cfg.Logger - progressor := func(curr, total int64) { - lgr.Info("artifacts download progress", "current", curr, "total", total) - } - - artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor()) if err != nil { return dpo, fmt.Errorf("failed to download artifacts: %w", err) } - defer func() { - if err := cleanup(); err != nil { - lgr.Warn("failed to clean up artifacts", "err", err) - } - }() l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { diff --git a/op-deployer/pkg/deployer/bootstrap/superchain.go b/op-deployer/pkg/deployer/bootstrap/superchain.go index d9c5ed3d523..6bfec6c44c7 100644 --- a/op-deployer/pkg/deployer/bootstrap/superchain.go +++ b/op-deployer/pkg/deployer/bootstrap/superchain.go @@ -139,19 +139,10 @@ func Superchain(ctx context.Context, cfg SuperchainConfig) (opcm.DeploySuperchai } lgr := cfg.Logger - progressor := func(curr, total int64) { - lgr.Info("artifacts download progress", "current", curr, "total", total) - } - - artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor()) if err != nil { return dso, fmt.Errorf("failed to download artifacts: %w", err) } - defer func() { - if err := cleanup(); err != nil { - lgr.Warn("failed to clean up artifacts", "err", err) - } - }() l1Client, err := ethclient.Dial(cfg.L1RPCUrl) if err != nil { diff --git a/op-deployer/pkg/deployer/broadcaster/calldata.go b/op-deployer/pkg/deployer/broadcaster/calldata.go index 5c408276efb..10b4b8d493b 100644 --- a/op-deployer/pkg/deployer/broadcaster/calldata.go +++ b/op-deployer/pkg/deployer/broadcaster/calldata.go @@ -13,9 +13,9 @@ import ( const defaultGasLimit = 30_000_000 type CalldataDump struct { - To *common.Address - Data hexutil.Bytes - Value *hexutil.Big + To *common.Address `json:"to"` + Data hexutil.Bytes `json:"data"` + Value *hexutil.Big `json:"value"` } type CalldataBroadcaster struct { diff --git a/op-deployer/pkg/deployer/broadcaster/keyed.go b/op-deployer/pkg/deployer/broadcaster/keyed.go index 1f25796a737..2031ebe296e 100644 --- a/op-deployer/pkg/deployer/broadcaster/keyed.go +++ b/op-deployer/pkg/deployer/broadcaster/keyed.go @@ -230,7 +230,7 @@ func asTxCandidate(bcast script.Broadcast, blockGasLimit uint64) txmgr.TxCandida // is clamped to the block gas limit since Geth will reject transactions that exceed it before letting them // into the mempool. func padGasLimit(data []byte, gasUsed uint64, creation bool, blockGasLimit uint64) uint64 { - intrinsicGas, err := core.IntrinsicGas(data, nil, creation, true, true, false) + intrinsicGas, err := core.IntrinsicGas(data, nil, nil, creation, true, true, false) // This method never errors - we should look into it if it does. if err != nil { panic(err) diff --git a/op-deployer/pkg/deployer/flags.go b/op-deployer/pkg/deployer/flags.go index 3494ad436e5..6c68affdd7e 100644 --- a/op-deployer/pkg/deployer/flags.go +++ b/op-deployer/pkg/deployer/flags.go @@ -115,6 +115,12 @@ var ApplyFlags = []cli.Flag{ DeploymentTargetFlag, } +var UpgradeFlags = []cli.Flag{ + L1RPCURLFlag, + PrivateKeyFlag, + DeploymentTargetFlag, +} + func PrefixEnvVar(name string) []string { return op_service.PrefixEnvVar(EnvVarPrefix, name) } diff --git a/op-deployer/pkg/deployer/inspect/semvers.go b/op-deployer/pkg/deployer/inspect/semvers.go index 48e16d21dbc..f9658bf184c 100644 --- a/op-deployer/pkg/deployer/inspect/semvers.go +++ b/op-deployer/pkg/deployer/inspect/semvers.go @@ -59,15 +59,10 @@ func L2SemversCLI(cliCtx *cli.Context) error { return fmt.Errorf("chain state does not have allocs") } - artifactsFS, cleanup, err := artifacts.Download(ctx, intent.L2ContractsLocator, artifacts.LogProgressor(l)) + artifactsFS, err := artifacts.Download(ctx, intent.L2ContractsLocator, artifacts.BarProgressor()) if err != nil { return fmt.Errorf("failed to download L2 artifacts: %w", err) } - defer func() { - if err := cleanup(); err != nil { - l.Warn("failed to clean up L2 artifacts", "err", err) - } - }() ps, err := L2Semvers(L2SemversConfig{ Lgr: l, diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 06122b07266..f4489003c97 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -583,7 +583,7 @@ func TestManageDependencies(t *testing.T) { // doesn't expose the host directly. loc, _ := testutil.LocalArtifacts(t) - afacts, _, err := artifacts.Download(ctx, loc, artifacts.NoopDownloadProgressor) + afacts, err := artifacts.Download(ctx, loc, artifacts.NoopProgressor()) require.NoError(t, err) host, err := env.DefaultScriptHost( diff --git a/op-deployer/pkg/deployer/manage/dependencies.go b/op-deployer/pkg/deployer/manage/dependencies.go index a95a17b4f8a..a197f3f80bd 100644 --- a/op-deployer/pkg/deployer/manage/dependencies.go +++ b/op-deployer/pkg/deployer/manage/dependencies.go @@ -116,15 +116,10 @@ func Dependencies(ctx context.Context, cfg DependenciesConfig) error { lgr.Info("artifacts download progress", "current", curr, "total", total) } - artifactsFS, cleanup, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, progressor) if err != nil { return fmt.Errorf("failed to download artifacts: %w", err) } - defer func() { - if err := cleanup(); err != nil { - lgr.Warn("failed to clean up artifacts", "err", err) - } - }() l1RPC, err := rpc.Dial(cfg.L1RPCUrl) if err != nil { diff --git a/op-deployer/pkg/deployer/testutil/env.go b/op-deployer/pkg/deployer/testutil/env.go index 6b289c4503b..29c57ea5d72 100644 --- a/op-deployer/pkg/deployer/testutil/env.go +++ b/op-deployer/pkg/deployer/testutil/env.go @@ -8,14 +8,14 @@ import ( "runtime" "testing" - artifacts2 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" op_service "github.com/ethereum-optimism/optimism/op-service" "github.com/stretchr/testify/require" ) -func LocalArtifacts(t *testing.T) (*artifacts2.Locator, foundry.StatDirFs) { +func LocalArtifacts(t *testing.T) (*artifacts.Locator, foundry.StatDirFs) { _, testFilename, _, ok := runtime.Caller(0) require.Truef(t, ok, "failed to get test filename") monorepoDir, err := op_service.FindMonorepoRoot(testFilename) @@ -23,15 +23,12 @@ func LocalArtifacts(t *testing.T) (*artifacts2.Locator, foundry.StatDirFs) { artifactsDir := path.Join(monorepoDir, "packages", "contracts-bedrock", "forge-artifacts") artifactsURL, err := url.Parse(fmt.Sprintf("file://%s", artifactsDir)) require.NoError(t, err) - loc := &artifacts2.Locator{ + loc := &artifacts.Locator{ URL: artifactsURL, } - artifactsFS, cleanupArtifacts, err := artifacts2.Download(context.Background(), loc, artifacts2.NoopDownloadProgressor) + artifactsFS, err := artifacts.Download(context.Background(), loc, artifacts.NoopProgressor()) require.NoError(t, err) - t.Cleanup(func() { - _ = cleanupArtifacts() - }) return loc, artifactsFS } diff --git a/op-deployer/pkg/deployer/upgrade/flags.go b/op-deployer/pkg/deployer/upgrade/flags.go new file mode 100644 index 00000000000..51f15aa88b5 --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/flags.go @@ -0,0 +1,34 @@ +package upgrade + +import ( + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + v200 "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade/v2_0_0" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/urfave/cli/v2" +) + +var ( + ConfigFlag = &cli.StringFlag{ + Name: "config", + Usage: "path to the config file", + } + OverrideArtifactsURLFlag = &cli.StringFlag{ + Name: "override-artifacts-url", + Usage: "override the artifacts URL", + } +) + +var Commands = cli.Commands{ + &cli.Command{ + Name: "v2.0.0", + Usage: "upgrades a chain to version v2.0.0", + Flags: append([]cli.Flag{ + deployer.L1RPCURLFlag, + deployer.DeploymentTargetFlag, + deployer.PrivateKeyFlag, + ConfigFlag, + OverrideArtifactsURLFlag, + }, oplog.CLIFlags(deployer.EnvVarPrefix)...), + Action: UpgradeCLI(v200.DefaultUpgrader), + }, +} diff --git a/op-deployer/pkg/deployer/upgrade/upgrader.go b/op-deployer/pkg/deployer/upgrade/upgrader.go new file mode 100644 index 00000000000..1b7457c44e1 --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/upgrader.go @@ -0,0 +1,152 @@ +package upgrade + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + "github.com/urfave/cli/v2" +) + +type Upgrader interface { + Upgrade(host *script.Host, input json.RawMessage) error + SupportsVersion(version string) bool + ArtifactsURL() string +} + +func UpgradeCLI(upgrader Upgrader) func(*cli.Context) error { + return func(cliCtx *cli.Context) error { + logCfg := oplog.ReadCLIConfig(cliCtx) + lgr := oplog.NewLogger(oplog.AppOut(cliCtx), logCfg) + oplog.SetGlobalLogHandler(lgr.Handler()) + + ctx, cancel := context.WithCancel(cliCtx.Context) + defer cancel() + + l1RPC := cliCtx.String(deployer.L1RPCURLFlag.Name) + if l1RPC == "" { + return fmt.Errorf("missing required flag: %s", deployer.L1RPCURLFlag.Name) + } + deploymentTarget, err := deployer.NewDeploymentTarget(cliCtx.String(deployer.DeploymentTargetFlag.Name)) + if err != nil { + return fmt.Errorf("failed to parse deployment target: %w", err) + } + + artifactsURL := upgrader.ArtifactsURL() + overrideArtifactsURL := cliCtx.String(OverrideArtifactsURLFlag.Name) + if overrideArtifactsURL != "" { + artifactsURL = overrideArtifactsURL + } + artifactsLocator, err := artifacts.NewLocatorFromURL(artifactsURL) + if err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + + rpcClient, err := rpc.Dial(l1RPC) + if err != nil { + return fmt.Errorf("failed to dial RPC %s: %w", l1RPC, err) + } + ethClient := ethclient.NewClient(rpcClient) + + chainID, err := ethClient.ChainID(ctx) + if err != nil { + return fmt.Errorf("failed to get chain ID: %w", err) + } + + var bcaster broadcaster.Broadcaster + depAddr := common.Address{'D'} + switch deploymentTarget { + case deployer.DeploymentTargetLive: + privateKeyHex := cliCtx.String(deployer.PrivateKeyFlag.Name) + if privateKeyHex == "" { + return fmt.Errorf("%s flag is required for live deployment", deployer.PrivateKeyFlag.Name) + } + + pk, err := crypto.HexToECDSA(privateKeyHex) + if err != nil { + return fmt.Errorf("failed to parse private key: %w", err) + } + + depAddr = crypto.PubkeyToAddress(pk.PublicKey) + + bcaster, err = broadcaster.NewKeyedBroadcaster(broadcaster.KeyedBroadcasterOpts{ + Logger: lgr, + ChainID: chainID, + Client: ethClient, + Signer: opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(pk, chainID)), + From: depAddr, + }) + if err != nil { + return fmt.Errorf("failed to create broadcaster: %w", err) + } + case deployer.DeploymentTargetCalldata: + bcaster = new(broadcaster.CalldataBroadcaster) + case deployer.DeploymentTargetNoop: + bcaster = broadcaster.NoopBroadcaster() + case deployer.DeploymentTargetGenesis: + return fmt.Errorf("cannot upgrade into a genesis deployment") + default: + return fmt.Errorf("unknown deployment target: %s", deploymentTarget) + } + + artifactsFS, err := artifacts.Download(ctx, artifactsLocator, artifacts.BarProgressor()) + if err != nil { + return fmt.Errorf("failed to download L1 artifacts: %w", err) + } + + host, err := env.DefaultForkedScriptHost( + ctx, + bcaster, + lgr, + depAddr, + artifactsFS, + rpcClient, + ) + if err != nil { + return fmt.Errorf("failed to create script host: %w", err) + } + + configFilePath := cliCtx.String(ConfigFlag.Name) + if configFilePath == "" { + return fmt.Errorf("missing required flag: %s", ConfigFlag.Name) + } + cfgData, err := os.ReadFile(configFilePath) + if err != nil { + return fmt.Errorf("failed to read config file: %w", err) + } + if err := upgrader.Upgrade(host, cfgData); err != nil { + return fmt.Errorf("failed to upgrade: %w", err) + } + + if deploymentTarget == deployer.DeploymentTargetCalldata { + dump, err := bcaster.(*broadcaster.CalldataBroadcaster).Dump() + if err != nil { + return fmt.Errorf("failed to dump calldata: %w", err) + } + + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + if err := enc.Encode(dump); err != nil { + return fmt.Errorf("failed to encode calldata: %w", err) + } + } else if deploymentTarget == deployer.DeploymentTargetLive { + if _, err := bcaster.Broadcast(ctx); err != nil { + return fmt.Errorf("failed to broadcast: %w", err) + } + } + + return nil + } +} diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json b/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json new file mode 100644 index 00000000000..2b0111674da --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/testdata/config.json @@ -0,0 +1,10 @@ +{ + "prank": "0x1Eb2fFc903729a0F03966B917003800b145F56E2", + "opcm": "0x0c9efe47eac86ee9868dda15c9c584025a7de1d0", + "chainConfigs": [ + { + "systemConfigProxy": "0x034edD2A225f7f429A63E0f1D2084B9E0A93b538", + "proxyAdmin": "0x189aBAAaa82DfC015A588A7dbaD6F13b1D3485Bc" + } + ] +} \ No newline at end of file diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go new file mode 100644 index 00000000000..97fe565545c --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade.go @@ -0,0 +1,60 @@ +package v2_0_0 + +import ( + "encoding/json" + "fmt" + + "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" + "github.com/ethereum/go-ethereum/common" + "github.com/lmittmann/w3" +) + +type UpgradeOPChainInput struct { + Prank common.Address `json:"prank"` + Opcm common.Address `json:"opcm"` + EncodedChainConfigs []OPChainConfig `evm:"-" json:"chainConfigs"` +} + +type OPChainConfig struct { + SystemConfigProxy common.Address `json:"systemConfigProxy"` + ProxyAdmin common.Address `json:"proxyAdmin"` +} + +var opChainConfigEncoder = w3.MustNewFunc("dummy((address systemConfigProxy,address proxyAdmin)[])", "") + +func (u *UpgradeOPChainInput) OpChainConfigs() ([]byte, error) { + data, err := opChainConfigEncoder.EncodeArgs(u.EncodedChainConfigs) + if err != nil { + return nil, fmt.Errorf("failed to encode chain configs: %w", err) + } + return data[4:], nil +} + +type UpgradeOPChain struct { + Run func(input common.Address) +} + +func Upgrade(host *script.Host, input UpgradeOPChainInput) error { + return opcm.RunScriptVoid[UpgradeOPChainInput](host, input, "UpgradeOPChain.s.sol", "UpgradeOPChain") +} + +type Upgrader struct{} + +func (u *Upgrader) Upgrade(host *script.Host, input json.RawMessage) error { + var upgradeInput UpgradeOPChainInput + if err := json.Unmarshal(input, &upgradeInput); err != nil { + return fmt.Errorf("failed to unmarshal input: %w", err) + } + return Upgrade(host, upgradeInput) +} + +func (u *Upgrader) SupportsVersion(version string) bool { + return version == "2.0.0" +} + +func (u *Upgrader) ArtifactsURL() string { + return "" +} + +var DefaultUpgrader = new(Upgrader) diff --git a/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go new file mode 100644 index 00000000000..1074fba9c9d --- /dev/null +++ b/op-deployer/pkg/deployer/upgrade/v2_0_0/upgrade_test.go @@ -0,0 +1,117 @@ +package v2_0_0 + +import ( + "context" + "encoding/hex" + "log/slog" + "os" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/testutil" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/retryproxy" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/stretchr/testify/require" +) + +func TestUpgradeOPChainInput_OpChainConfigs(t *testing.T) { + input := &UpgradeOPChainInput{ + Prank: common.Address{0xaa}, + Opcm: common.Address{0xbb}, + EncodedChainConfigs: []OPChainConfig{ + { + SystemConfigProxy: common.Address{0x01}, + ProxyAdmin: common.Address{0x02}, + }, + { + SystemConfigProxy: common.Address{0x04}, + ProxyAdmin: common.Address{0x05}, + }, + }, + } + data, err := input.OpChainConfigs() + require.NoError(t, err) + require.Equal( + t, + "0000000000000000000000000000000000000000000000000000000000000020"+ + "0000000000000000000000000000000000000000000000000000000000000002"+ + "0000000000000000000000000100000000000000000000000000000000000000"+ + "0000000000000000000000000200000000000000000000000000000000000000"+ + "0000000000000000000000000400000000000000000000000000000000000000"+ + "0000000000000000000000000500000000000000000000000000000000000000", + hex.EncodeToString(data), + ) +} + +func TestUpgrader_Upgrade(t *testing.T) { + _, afactsFS := testutil.LocalArtifacts(t) + + forkRPCURL := os.Getenv("SEPOLIA_RPC_URL") + require.NotEmpty(t, forkRPCURL, "must specify RPC url via SEPOLIA_RPC_URL env var") + + lgr := testlog.Logger(t, slog.LevelDebug) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + retryProxy := retryproxy.New(lgr, forkRPCURL) + require.NoError(t, retryProxy.Start()) + t.Cleanup(func() { + require.NoError(t, retryProxy.Stop()) + }) + + rpcClient, err := rpc.Dial(retryProxy.Endpoint()) + require.NoError(t, err) + + bcast := new(broadcaster.CalldataBroadcaster) + host, err := env.DefaultForkedScriptHost( + ctx, + bcast, + lgr, + common.Address{'D'}, + afactsFS, + rpcClient, + ) + require.NoError(t, err) + + configFile, err := os.ReadFile("testdata/config.json") + require.NoError(t, err) + + upgrader := DefaultUpgrader + require.NoError(t, upgrader.Upgrade(host, configFile)) + + dump, err := bcast.Dump() + require.NoError(t, err) + + addr := common.HexToAddress("0x1Eb2fFc903729a0F03966B917003800b145F56E2") + require.True(t, dump[0].Value.ToInt().Cmp(common.Big0) == 0) + // Have to do this to normalize zero values which can either set nat to nil + // or to a zero value. They mean the same thing, but aren't equal according to + // EqualValues. + dump[0].Value = (*hexutil.Big)(common.Big0) + + require.EqualValues(t, []broadcaster.CalldataDump{ + { + To: &addr, + Data: []byte{ + 0x5d, 0x4e, 0xfc, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0x4e, 0xdd, 0x2a, 0x22, 0x5f, 0x7f, 0x42, 0x9a, 0x63, 0xe0, + 0xf1, 0xd2, 0x08, 0x4b, 0x9e, 0x0a, 0x93, 0xb5, 0x38, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x9a, 0xba, 0xaa, 0xa8, + 0x2d, 0xfc, 0x01, 0x5a, 0x58, 0x8a, 0x7d, 0xba, 0xd6, 0xf1, 0x3b, 0x1d, 0x34, + 0x85, 0xbc, + }, + Value: (*hexutil.Big)(common.Big0), + }, + }, dump) +} diff --git a/op-e2e/Makefile b/op-e2e/Makefile index f849aa7cda8..637f39c3234 100644 --- a/op-e2e/Makefile +++ b/op-e2e/Makefile @@ -40,8 +40,8 @@ test-fault-proofs: pre-test cannon-prestates: make -C .. cannon-prestate - make -C .. cannon-prestate-mt -.PHONY: cannon-prestate + make -C .. cannon-prestate-mt64 +.PHONY: cannon-prestates pre-test: pre-test-cannon diff --git a/op-e2e/actions/batcher/l2_batcher_test.go b/op-e2e/actions/batcher/l2_batcher_test.go index 3fec73db4f7..9d080d2bafd 100644 --- a/op-e2e/actions/batcher/l2_batcher_test.go +++ b/op-e2e/actions/batcher/l2_batcher_test.go @@ -472,7 +472,7 @@ func BigL2Txs(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { data := make([]byte, 120_000) // very large L2 txs, as large as the tx-pool will accept _, err := rng.Read(data[:]) // fill with random bytes, to make compression ineffective require.NoError(t, err) - gas, err := core.IntrinsicGas(data, nil, false, true, true, false) + gas, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) require.NoError(t, err) if gas > engine.EngineApi.RemainingBlockGas() { break diff --git a/op-e2e/actions/helpers/l1_miner.go b/op-e2e/actions/helpers/l1_miner.go index f752e482ab8..316ffc6e57d 100644 --- a/op-e2e/actions/helpers/l1_miner.go +++ b/op-e2e/actions/helpers/l1_miner.go @@ -117,8 +117,8 @@ func (s *L1Miner) ActL1StartBlock(timeDelta uint64) Action { if vmConfig := s.l1Chain.GetVMConfig(); vmConfig != nil && vmConfig.PrecompileOverrides != nil { precompileOverrides = vmConfig.PrecompileOverrides } - vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, s.l1Chain.Config(), vm.Config{PrecompileOverrides: precompileOverrides}) - core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, statedb) + vmenv := vm.NewEVM(context, statedb, s.l1Chain.Config(), vm.Config{PrecompileOverrides: precompileOverrides}) + core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv) } s.l1Building = true @@ -177,8 +177,10 @@ func (s *L1Miner) IncludeTx(t Testing, tx *types.Transaction) { return } s.l1BuildingState.SetTxContext(tx.Hash(), len(s.L1Transactions)) - receipt, err := core.ApplyTransaction(s.l1Cfg.Config, s.l1Chain, &s.l1BuildingHeader.Coinbase, - s.L1GasPool, s.l1BuildingState, s.l1BuildingHeader, tx.WithoutBlobTxSidecar(), &s.l1BuildingHeader.GasUsed, *s.l1Chain.GetVMConfig()) + blockCtx := core.NewEVMBlockContext(s.l1BuildingHeader, s.l1Chain, nil, s.l1Cfg.Config, s.l1BuildingState) + evm := vm.NewEVM(blockCtx, s.l1BuildingState, s.l1Cfg.Config, *s.l1Chain.GetVMConfig()) + receipt, err := core.ApplyTransaction( + evm, s.L1GasPool, s.l1BuildingState, s.l1BuildingHeader, tx.WithoutBlobTxSidecar(), &s.l1BuildingHeader.GasUsed) if err != nil { s.l1TxFailed = append(s.l1TxFailed, tx) t.Fatalf("failed to apply transaction to L1 block (tx %d): %v", len(s.L1Transactions), err) @@ -219,7 +221,8 @@ func (s *L1Miner) ActL1EndBlock(t Testing) *types.Block { } block := types.NewBlock(s.l1BuildingHeader, &types.Body{Transactions: s.L1Transactions, Withdrawals: withdrawals}, s.l1Receipts, trie.NewStackTrie(nil), types.DefaultBlockConfig) - if s.l1Cfg.Config.IsCancun(s.l1BuildingHeader.Number, s.l1BuildingHeader.Time) { + isCancun := s.l1Cfg.Config.IsCancun(s.l1BuildingHeader.Number, s.l1BuildingHeader.Time) + if isCancun { parent := s.l1Chain.GetHeaderByHash(s.l1BuildingHeader.ParentHash) var ( parentExcessBlobGas uint64 @@ -234,7 +237,7 @@ func (s *L1Miner) ActL1EndBlock(t Testing) *types.Block { } // Write state changes to db - root, err := s.l1BuildingState.Commit(s.l1BuildingHeader.Number.Uint64(), s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number)) + root, err := s.l1BuildingState.Commit(s.l1BuildingHeader.Number.Uint64(), s.l1Cfg.Config.IsEIP158(s.l1BuildingHeader.Number), isCancun) if err != nil { t.Fatalf("l1 state write error: %v", err) } diff --git a/op-e2e/actions/helpers/l2_batcher.go b/op-e2e/actions/helpers/l2_batcher.go index 88fde9bd9a9..f270bddb717 100644 --- a/op-e2e/actions/helpers/l2_batcher.go +++ b/op-e2e/actions/helpers/l2_batcher.go @@ -343,7 +343,7 @@ func (s *L2Batcher) ActL2BatchSubmitRaw(t Testing, payload []byte, txOpts ...fun opt(rawTx) } - gas, err := core.IntrinsicGas(rawTx.Data, nil, false, true, true, false) + gas, err := core.IntrinsicGas(rawTx.Data, nil, nil, false, true, true, false) require.NoError(t, err, "need to compute intrinsic gas") rawTx.Gas = gas txData = rawTx diff --git a/op-e2e/actions/interop/interop_test.go b/op-e2e/actions/interop/interop_test.go index 9957d0da502..720b1185c97 100644 --- a/op-e2e/actions/interop/interop_test.go +++ b/op-e2e/actions/interop/interop_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -26,6 +27,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum-optimism/optimism/op-service/testlog" + gethTypes "github.com/ethereum/go-ethereum/core/types" ) func TestFullInterop(gt *testing.T) { @@ -264,6 +266,8 @@ func TestInteropLocalSafeInvalidation(gt *testing.T) { actors.ChainB.Sequencer.ActL2PipelineFull(t) originalBlock := actors.ChainB.Sequencer.SyncStatus().UnsafeL2 require.Equal(t, uint64(1), originalBlock.Number) + originalOutput, err := actors.ChainB.Sequencer.RollupClient().OutputAtBlock(t.Ctx(), originalBlock.Number) + require.NoError(t, err) // build another empty L2 block, that will get reorged out actors.ChainB.Sequencer.ActL2StartBlock(t) @@ -326,7 +330,7 @@ func TestInteropLocalSafeInvalidation(gt *testing.T) { txs := replacementBlock.Transactions() out, err := managed.DecodeInvalidatedBlockTx(txs[len(txs)-1]) require.NoError(t, err) - require.Equal(t, originalBlock.Hash, out.BlockHash) + require.Equal(t, originalOutput.OutputRoot, eth.OutputRoot(out)) // Now check if we can continue to build L2 blocks on top of the new chain. // Build a new L2 block @@ -543,10 +547,268 @@ func TestInteropFaultProofs(gt *testing.T) { expectValid: true, }, { - name: "Consolidate-ReplaceInvalidBlock", - // Will need to generate an invalid block before this can be enabled - skipProgram: true, - skipChallenger: true, + name: "AlreadyAtClaimedTimestamp", + agreedClaim: end.Marshal(), + disputedClaim: end.Marshal(), + disputedTraceIndex: 5000, + expectValid: true, + }, + + { + name: "FirstChainReachesL1Head", + agreedClaim: start.Marshal(), + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 0, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + }, + { + name: "SecondChainReachesL1Head", + agreedClaim: step1Expected, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 1, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + skipChallenger: true, // test's agreedClaim is incorrect - first chain is also invalid + }, + { + name: "SuperRootInvalidIfUnsupportedByL1Data", + agreedClaim: start.Marshal(), + disputedClaim: step1Expected, + disputedTraceIndex: 0, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: false, + }, + { + name: "FromInvalidTransitionHash", + agreedClaim: interop.InvalidTransition, + disputedClaim: interop.InvalidTransition, + disputedTraceIndex: 2, + // The derivation reaches the L1 head before the next block can be created + l1Head: actors.L1Miner.L1Chain().Genesis().Hash(), + expectValid: true, + }, + } + + for _, test := range tests { + test := test + gt.Run(fmt.Sprintf("%s-fpp", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipProgram { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + checkResult := fpHelpers.ExpectNoError() + if !test.expectValid { + checkResult = fpHelpers.ExpectError(claim.ErrClaimNotValid) + } + l1Head := test.l1Head + if l1Head == (common.Hash{}) { + l1Head = actors.L1Miner.L1Chain().CurrentBlock().Hash() + } + fpHelpers.RunFaultProofProgram( + t, + logger, + actors.L1Miner, + checkResult, + WithInteropEnabled(actors, test.agreedClaim, crypto.Keccak256Hash(test.disputedClaim), endTimestamp), + fpHelpers.WithL1Head(l1Head), + ) + }) + + gt.Run(fmt.Sprintf("%s-challenger", test.name), func(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + if test.skipChallenger { + t.Skip("Not yet implemented") + return + } + logger := testlog.Logger(t, slog.LevelInfo) + prestateProvider := super.NewSuperRootPrestateProvider(&actors.Supervisor.QueryFrontend, startTimestamp) + var l1Head eth.BlockID + if test.l1Head == (common.Hash{}) { + l1Head = eth.ToBlockID(eth.HeaderBlockInfo(actors.L1Miner.L1Chain().CurrentBlock())) + } else { + l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) + } + gameDepth := challengerTypes.Depth(30) + rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) + require.NoError(t, err) + provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) + var agreedPrestate []byte + if test.disputedTraceIndex > 0 { + agreedPrestate, err = provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) + require.NoError(t, err) + } else { + superRoot, err := provider.AbsolutePreState(ctx) + require.NoError(t, err) + agreedPrestate = superRoot.Marshal() + } + require.Equal(t, test.agreedClaim, agreedPrestate) + + disputedClaim, err := provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex))) + require.NoError(t, err) + if test.expectValid { + require.Equal(t, test.disputedClaim, disputedClaim, "Claim is correct so should match challenger's opinion") + } else { + require.NotEqual(t, test.disputedClaim, disputedClaim, "Claim is incorrect so should not match challenger's opinion") + } + }) + } +} + +func TestInteropFaultProofsInvalidBlock(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + + is := SetupInterop(t) + actors := is.CreateActors() + aliceA := setupUser(t, is, actors.ChainA, 0) + aliceB := setupUser(t, is, actors.ChainB, 0) + initializeChainState(t, actors) + emitTx := initializeEmitterContractTest(t, aliceA, actors) + + // Create a message with a conflicting payload + fakeMessage := []byte("this message was never emitted") + auth := newL2TxOpts(t, aliceB.secret, actors.ChainB) + id := idForTx(t, actors, emitTx) + contract, err := inbox.NewInbox(predeploys.CrossL2InboxAddr, actors.ChainB.SequencerEngine.EthClient()) + require.NoError(t, err) + execTx, err := contract.ValidateMessage(auth, id, crypto.Keccak256Hash(fakeMessage)) + require.NoError(t, err) + includeTxOnChainAndSyncWithoutCrossSafety(t, actors, actors.ChainB, execTx, aliceB.address) + + // Confirm transaction inclusion + rec, err := actors.ChainB.SequencerEngine.EthClient().TransactionReceipt(t.Ctx(), execTx.Hash()) + require.NoError(t, err) + require.NotNil(t, rec) + + // safe head is still behind until we verify cross-safe + assertHeads(t, actors.ChainA, 3, 3, 2, 2) + assertHeads(t, actors.ChainB, 3, 3, 2, 2) + endTimestamp := actors.ChainB.Sequencer.L2Unsafe().Time + + chainAClient := actors.ChainA.Sequencer.RollupClient() + chainBClient := actors.ChainB.Sequencer.RollupClient() + + ctx := context.Background() + startTimestamp := endTimestamp - 1 + source, err := NewSuperRootSource(ctx, chainAClient, chainBClient) + require.NoError(t, err) + start, err := source.CreateSuperRoot(ctx, startTimestamp) + require.NoError(t, err) + end, err := source.CreateSuperRoot(ctx, endTimestamp) + require.NoError(t, err) + + endBlockNumA, err := actors.ChainA.RollupCfg.TargetBlockNumber(endTimestamp) + require.NoError(t, err) + chain1End, err := chainAClient.OutputAtBlock(ctx, endBlockNumA) + require.NoError(t, err) + + endBlockNumB, err := actors.ChainB.RollupCfg.TargetBlockNumber(endTimestamp) + require.NoError(t, err) + chain2End, err := chainBClient.OutputAtBlock(ctx, endBlockNumB) + require.NoError(t, err) + + step1Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + }, + Step: 1, + }).Marshal() + + step2Expected := (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: 2, + }).Marshal() + + paddingStep := func(step uint64) []byte { + return (&types.TransitionState{ + SuperRoot: start.Marshal(), + PendingProgress: []types.OptimisticBlock{ + {BlockHash: chain1End.BlockRef.Hash, OutputRoot: chain1End.OutputRoot}, + {BlockHash: chain2End.BlockRef.Hash, OutputRoot: chain2End.OutputRoot}, + }, + Step: step, + }).Marshal() + } + + // Induce block replacement + verifyCrossSafe(t, actors) + // assert that the invalid message tx was reorged out + _, err = actors.ChainB.SequencerEngine.EthClient().TransactionReceipt(t.Ctx(), execTx.Hash()) + require.ErrorIs(gt, err, ethereum.NotFound) + assertHeads(t, actors.ChainA, 3, 3, 3, 3) + assertHeads(t, actors.ChainB, 3, 3, 3, 3) + + crossSafeSuperRootEnd, err := source.CreateSuperRoot(ctx, endTimestamp) + require.NoError(t, err) + + tests := []*transitionTest{ + { + name: "FirstChainOptimisticBlock", + agreedClaim: start.Marshal(), + disputedClaim: step1Expected, + disputedTraceIndex: 0, + expectValid: true, + skipChallenger: true, + }, + { + name: "SecondChainOptimisticBlock", + agreedClaim: step1Expected, + disputedClaim: step2Expected, + disputedTraceIndex: 1, + expectValid: true, + skipChallenger: true, + }, + { + name: "FirstPaddingStep", + agreedClaim: step2Expected, + disputedClaim: paddingStep(3), + disputedTraceIndex: 2, + expectValid: true, + skipChallenger: true, + }, + { + name: "SecondPaddingStep", + agreedClaim: paddingStep(3), + disputedClaim: paddingStep(4), + disputedTraceIndex: 3, + expectValid: true, + skipChallenger: true, + }, + { + name: "LastPaddingStep", + agreedClaim: paddingStep(1022), + disputedClaim: paddingStep(1023), + disputedTraceIndex: 1022, + expectValid: true, + skipChallenger: true, + }, + { + name: "Consolidate-ExpectInvalidPendingBlock", + agreedClaim: paddingStep(1023), + disputedClaim: end.Marshal(), + disputedTraceIndex: 1023, + expectValid: false, + skipProgram: true, + skipChallenger: true, + }, + { + name: "Consolidate-ReplaceInvalidBlock", + agreedClaim: paddingStep(1023), + disputedClaim: crossSafeSuperRootEnd.Marshal(), + disputedTraceIndex: 1023, + expectValid: true, + skipProgram: true, + skipChallenger: true, }, { name: "Consolidate-ReplaceBlockInvalidatedByFirstInvalidatedBlock", @@ -558,8 +820,8 @@ func TestInteropFaultProofs(gt *testing.T) { }, { name: "AlreadyAtClaimedTimestamp", - agreedClaim: end.Marshal(), - disputedClaim: end.Marshal(), + agreedClaim: crossSafeSuperRootEnd.Marshal(), + disputedClaim: crossSafeSuperRootEnd.Marshal(), disputedTraceIndex: 5000, expectValid: true, }, @@ -648,7 +910,9 @@ func TestInteropFaultProofs(gt *testing.T) { l1Head = eth.ToBlockID(actors.L1Miner.L1Chain().GetBlockByHash(test.l1Head)) } gameDepth := challengerTypes.Depth(30) - provider := super.NewSuperTraceProvider(logger, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) + rollupCfgs, err := super.NewRollupConfigsFromParsed(actors.ChainA.RollupCfg, actors.ChainB.RollupCfg) + require.NoError(t, err) + provider := super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, &actors.Supervisor.QueryFrontend, l1Head, gameDepth, startTimestamp, endTimestamp) var agreedPrestate []byte if test.disputedTraceIndex > 0 { agreedPrestate, err = provider.GetPreimageBytes(ctx, challengerTypes.NewPosition(gameDepth, big.NewInt(test.disputedTraceIndex-1))) @@ -671,6 +935,60 @@ func TestInteropFaultProofs(gt *testing.T) { } } +func includeTxOnChainAndSyncWithoutCrossSafety(t helpers.Testing, actors *InteropActors, chain *Chain, tx *gethTypes.Transaction, sender common.Address) { + // Advance both chains + chain.Sequencer.ActL2StartBlock(t) + if tx != nil { + err := chain.SequencerEngine.EngineApi.IncludeTx(tx, sender) + require.NoError(t, err) + } + chain.Sequencer.ActL2EndBlock(t) + + cross := actors.ChainA + if chain == actors.ChainA { + cross = actors.ChainB + } + cross.Sequencer.ActL2StartBlock(t) + cross.Sequencer.ActL2EndBlock(t) + + // Sync the chain and the supervisor + chain.Sequencer.SyncSupervisor(t) + actors.Supervisor.ProcessFull(t) + + // Add to L1 + actors.ChainA.Batcher.ActSubmitAll(t) + actors.ChainB.Batcher.ActSubmitAll(t) + actors.L1Miner.ActL1StartBlock(12)(t) + actors.L1Miner.ActL1IncludeTx(actors.ChainA.BatcherAddr)(t) + actors.L1Miner.ActL1IncludeTx(actors.ChainB.BatcherAddr)(t) + actors.L1Miner.ActL1EndBlock(t) + + // Complete L1 data processing + actors.ChainA.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) + actors.ChainB.Sequencer.ActL2EventsUntil(t, event.Is[derive.ExhaustedL1Event], 100, false) + actors.Supervisor.SignalLatestL1(t) + actors.ChainA.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 + actors.ChainB.Sequencer.SyncSupervisor(t) // supervisor to react to exhaust-L1 + actors.ChainA.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. + actors.ChainB.Sequencer.ActL2PipelineFull(t) // node to complete syncing to L1 head. + + // Ingest the new local-safe event + actors.ChainA.Sequencer.SyncSupervisor(t) + actors.ChainB.Sequencer.SyncSupervisor(t) +} + +func verifyCrossSafe(t helpers.Testing, actors *InteropActors) { + actors.Supervisor.ProcessFull(t) + actors.ChainA.Sequencer.ActL2PipelineFull(t) + actors.ChainB.Sequencer.ActL2PipelineFull(t) + // another round-trip, for post-processing like cross-safe / cross-unsafe to propagate to the op-node + actors.ChainA.Sequencer.SyncSupervisor(t) + actors.ChainB.Sequencer.SyncSupervisor(t) + actors.Supervisor.ProcessFull(t) + actors.ChainA.Sequencer.ActL2PipelineFull(t) + actors.ChainB.Sequencer.ActL2PipelineFull(t) +} + func WithInteropEnabled(actors *InteropActors, agreedPrestate []byte, disputedClaim common.Hash, claimTimestamp uint64) fpHelpers.FixtureInputParam { return func(f *fpHelpers.FixtureInputs) { f.InteropEnabled = true diff --git a/op-e2e/actions/proofs/block_data_hint_test.go b/op-e2e/actions/proofs/block_data_hint_test.go new file mode 100644 index 00000000000..a0efd3956b6 --- /dev/null +++ b/op-e2e/actions/proofs/block_data_hint_test.go @@ -0,0 +1,158 @@ +package proofs + +import ( + "context" + "testing" + + altda "github.com/ethereum-optimism/optimism/op-alt-da" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-node/node/safedb" + "github.com/ethereum-optimism/optimism/op-node/rollup/event" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-program/client/l2" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func Test_OPProgramAction_BlockDataHint(gt *testing.T) { + testCfg := &helpers.TestCfg[any]{ + Hardfork: helpers.LatestFork, + } + t := actionsHelpers.NewDefaultTesting(gt) + env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg()) + + // Build a block on L2 with 1 tx. + env.Alice.L2.ActResetTxOpts(t) + env.Alice.L2.ActSetTxToAddr(&env.Dp.Addresses.Bob) + env.Alice.L2.ActMakeTx(t) + + env.Sequencer.ActL2StartBlock(t) + env.Engine.ActL2IncludeTx(env.Alice.Address())(t) + env.Sequencer.ActL2EndBlock(t) + env.Alice.L2.ActCheckReceiptStatusOfLastTx(true)(t) + + // Instruct the batcher to submit the block to L1, and include the transaction. + env.Batcher.ActSubmitAll(t) + env.Miner.ActL1StartBlock(12)(t) + env.Miner.ActL1IncludeTxByHash(env.Batcher.LastSubmitted.Hash())(t) + env.Miner.ActL1EndBlock(t) + + // Finalize the block with the batch on L1. + env.Miner.ActL1SafeNext(t) + env.Miner.ActL1FinalizeNext(t) + + // Instruct the sequencer to derive the L2 chain from the data on L1 that the batcher just posted. + env.Sequencer.ActL1HeadSignal(t) + env.Sequencer.ActL2PipelineFull(t) + + l1Head := env.Miner.L1Chain().CurrentBlock() + l2SafeHead := env.Engine.L2Chain().CurrentSafeBlock() + + // Ensure there is only 1 block on L1. + require.Equal(t, uint64(1), l1Head.Number.Uint64()) + // Ensure the block is marked as safe before we attempt to fault prove it. + require.Equal(t, uint64(1), l2SafeHead.Number.Uint64()) + + // Now create a verifier that syncs up to the safe head parent + // This simulates a reorg view for the program reexecution + verifier, verifierEngine := createVerifier(t, env) + verifier.ActL2EventsUntil(t, func(ev event.Event) bool { + until := l2SafeHead.Number.Uint64() - 1 + ref, err := verifier.Eng.BlockRefByNumber(context.Background(), until) + require.NoError(t, err) + return ref.Number == until + }, 20, false) + // Ensure that the block isn't available + _, err := verifier.Eng.BlockRefByNumber(context.Background(), l2SafeHead.Number.Uint64()) + require.ErrorIs(t, err, ethereum.NotFound) + + l2ClaimedBlockNumber := l2SafeHead.Number.Uint64() + syncedRollupClient := env.Sequencer.RollupClient() + l2PreBlockNum := l2ClaimedBlockNumber - 1 + preRoot, err := syncedRollupClient.OutputAtBlock(t.Ctx(), l2PreBlockNum) + require.NoError(t, err) + claimRoot, err := syncedRollupClient.OutputAtBlock(t.Ctx(), l2ClaimedBlockNumber) + require.NoError(t, err) + l2Claim := common.Hash(claimRoot.OutputRoot) + l2Head := l2SafeHead.ParentHash + l2AgreedOutputRoot := common.Hash(preRoot.OutputRoot) + chainID := eth.ChainIDFromBig(verifier.RollupCfg.L2ChainID) + + fixtureInputs := &helpers.FixtureInputs{ + L2BlockNumber: l2ClaimedBlockNumber, + L2Claim: l2Claim, + L2Head: l2Head, + L2OutputRoot: l2AgreedOutputRoot, + L2ChainID: chainID, + L1Head: l1Head.Hash(), + AgreedPrestate: nil, // not used for block execution + InteropEnabled: false, + L2Sources: []*helpers.FaultProofProgramL2Source{{ + Node: verifier, + Engine: verifierEngine, + ChainConfig: verifierEngine.L2Chain().Config(), + }}, + } + programCfg := helpers.NewOpProgramCfg(fixtureInputs) + kv := kvstore.NewMemKV() + prefetcher, err := helpers.CreateInprocessPrefetcher( + t, + t.Ctx(), + testlog.Logger(t, log.LevelDebug).New("role", "prefetcher"), + env.Miner, + kv, + programCfg, + fixtureInputs, + ) + require.NoError(t, err) + + oracle := func(key preimage.Key) []byte { + value, err := prefetcher.GetPreimage(t.Ctx(), key.PreimageKey()) + require.NoError(t, err) + return value + } + hinter := func(hint preimage.Hint) { + err := prefetcher.Hint(hint.Hint()) + require.NoError(t, err) + } + l2Oracle := l2.NewPreimageOracle(preimage.OracleFn(oracle), preimage.HinterFn(hinter), false) + + block := l2Oracle.BlockDataByHash(l2SafeHead.ParentHash, l2SafeHead.Hash(), chainID) + require.Equal(t, l2SafeHead.Hash(), block.Hash()) + + // It's enough to assert that these functions do not panic + txs := l2Oracle.LoadTransactions(l2SafeHead.Hash(), l2SafeHead.TxHash, chainID) + require.NotNil(t, txs) + _, receipts := l2Oracle.ReceiptsByBlockHash(l2SafeHead.Hash(), chainID) + require.NotNil(t, receipts) +} + +func createVerifier(t actionsHelpers.Testing, env *helpers.L2FaultProofEnv) (*actionsHelpers.L2Verifier, *actionsHelpers.L2Engine) { + logger := testlog.Logger(t, log.LevelInfo) + l1 := env.Miner.L1ClientSimple(t) + blobSrc := env.Miner.BlobStore() + jwtPath := e2eutils.WriteDefaultJWT(t) + engine := actionsHelpers.NewL2Engine(t, logger.New("role", "verifier-2"), env.Sd.L2Cfg, jwtPath) + l2EngineCl, err := sources.NewEngineClient(engine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(env.Sd.RollupCfg)) + require.NoError(t, err) + return actionsHelpers.NewL2Verifier( + t, + logger.New("role", "verifier-2"), + l1, + blobSrc, + altda.Disabled, + l2EngineCl, + env.Sd.RollupCfg, + &sync.Config{}, + safedb.Disabled, + ), engine +} diff --git a/op-e2e/actions/proofs/helpers/runner.go b/op-e2e/actions/proofs/helpers/runner.go index f75ee98a888..99ae29570d4 100644 --- a/op-e2e/actions/proofs/helpers/runner.go +++ b/op-e2e/actions/proofs/helpers/runner.go @@ -93,20 +93,7 @@ func RunFaultProofProgram(t helpers.Testing, logger log.Logger, l1 *helpers.L1Mi } else { programCfg := NewOpProgramCfg(fixtureInputs) withInProcessPrefetcher := hostcommon.WithPrefetcher(func(ctx context.Context, logger log.Logger, kv kvstore.KV, cfg *config.Config) (hostcommon.Prefetcher, error) { - // Set up in-process L1 sources - l1Cl := l1.L1ClientSimple(t) - l1BlobFetcher := l1.BlobSource() - - // Set up in-process L2 source - var rpcClients []client.RPC - for _, source := range fixtureInputs.L2Sources { - rpcClients = append(rpcClients, source.Engine.RPCClient()) - } - sources, err := prefetcher.NewRetryingL2Sources(ctx, logger, programCfg.Rollups, rpcClients, nil) - require.NoError(t, err, "failed to create L2 client") - - executor := host.MakeProgramExecutor(logger, programCfg) - return prefetcher.NewPrefetcher(logger, l1Cl, l1BlobFetcher, fixtureInputs.L2ChainID, sources, kv, executor, cfg.L2Head, cfg.AgreedPrestate), nil + return CreateInprocessPrefetcher(t, ctx, logger, l1, kv, cfg, fixtureInputs) }) ctx, cancel := context.WithTimeout(t.Ctx(), 2*time.Minute) defer cancel() @@ -114,3 +101,28 @@ func RunFaultProofProgram(t helpers.Testing, logger log.Logger, l1 *helpers.L1Mi checkResult(t, err) } } + +func CreateInprocessPrefetcher( + t helpers.Testing, + ctx context.Context, + logger log.Logger, + l1 *helpers.L1Miner, + kv kvstore.KV, + cfg *config.Config, + fixtureInputs *FixtureInputs, +) (hostcommon.Prefetcher, error) { + // Set up in-process L1 sources + l1Cl := l1.L1ClientSimple(t) + l1BlobFetcher := l1.BlobSource() + + // Set up in-process L2 source + var rpcClients []client.RPC + for _, source := range fixtureInputs.L2Sources { + rpcClients = append(rpcClients, source.Engine.RPCClient()) + } + sources, err := prefetcher.NewRetryingL2Sources(ctx, logger, cfg.Rollups, rpcClients, nil) + require.NoError(t, err, "failed to create L2 client") + + executor := host.MakeProgramExecutor(logger, cfg) + return prefetcher.NewPrefetcher(logger, l1Cl, l1BlobFetcher, fixtureInputs.L2ChainID, sources, kv, executor, cfg.L2Head, cfg.AgreedPrestate), nil +} diff --git a/op-e2e/actions/sync/sync_test.go b/op-e2e/actions/sync/sync_test.go index 862ea98fdfa..54cb72a0114 100644 --- a/op-e2e/actions/sync/sync_test.go +++ b/op-e2e/actions/sync/sync_test.go @@ -956,7 +956,7 @@ func TestInvalidPayloadInSpanBatch(gt *testing.T) { aliceNonce, err := seqEng.EthClient().PendingNonceAt(t.Ctx(), dp.Addresses.Alice) require.NoError(t, err) data := make([]byte, rand.Intn(100)) - gas, err := core.IntrinsicGas(data, nil, false, true, true, false) + gas, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) require.NoError(t, err) baseFee := seqEng.L2Chain().CurrentBlock().BaseFee tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ diff --git a/op-e2e/actions/upgrades/span_batch_test.go b/op-e2e/actions/upgrades/span_batch_test.go index 0588128b7db..c37f41311de 100644 --- a/op-e2e/actions/upgrades/span_batch_test.go +++ b/op-e2e/actions/upgrades/span_batch_test.go @@ -541,7 +541,7 @@ func TestSpanBatchLowThroughputChain(gt *testing.T) { data := make([]byte, rand.Intn(100)) _, err := crand.Read(data[:]) // fill with random bytes require.NoError(t, err) - gas, err := core.IntrinsicGas(data, nil, false, true, true, false) + gas, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) require.NoError(t, err) baseFee := seqEngine.L2Chain().CurrentBlock().BaseFee nonce, err := cl.PendingNonceAt(t.Ctx(), addrs[userIdx]) @@ -681,7 +681,7 @@ func TestBatchEquivalence(gt *testing.T) { data := make([]byte, rand.Intn(100)) _, err := crand.Read(data[:]) // fill with random bytes require.NoError(t, err) - gas, err := core.IntrinsicGas(data, nil, false, true, true, false) + gas, err := core.IntrinsicGas(data, nil, nil, false, true, true, false) require.NoError(t, err) baseFee := seqEngine.L2Chain().CurrentBlock().BaseFee nonce, err := seqEngCl.PendingNonceAt(t.Ctx(), addrs[userIdx]) diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index dcfa841ad75..ceef191f921 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -530,7 +530,7 @@ func cannonPrestate(monorepoRoot string, allocType AllocType) common.Hash { once = &cannonPrestateSTOnce cacheVar = &cannonPrestateST } else { - filename = "prestate-proof-mt.json" + filename = "prestate-proof-mt64.json" once = &cannonPrestateMTOnce cacheVar = &cannonPrestateMT } diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index 3038c02822e..f1559127749 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -126,8 +126,8 @@ func applyCannonConfig(c *config.Config, t *testing.T, rollupCfg *rollup.Config, c.Cannon.VmBin = root + "cannon/bin/cannon" c.Cannon.Server = root + "op-program/bin/op-program" if allocType == e2econfig.AllocTypeMTCannon { - t.Log("Using MT-Cannon absolute prestate") - c.CannonAbsolutePreState = root + "op-program/bin/prestate-mt.bin.gz" + t.Log("Using Cannon64 absolute prestate") + c.CannonAbsolutePreState = root + "op-program/bin/prestate-mt64.bin.gz" } else { c.CannonAbsolutePreState = root + "op-program/bin/prestate.bin.gz" } diff --git a/op-e2e/e2eutils/geth/fakepos.go b/op-e2e/e2eutils/geth/fakepos.go index 10914719814..a7490a065a3 100644 --- a/op-e2e/e2eutils/geth/fakepos.go +++ b/op-e2e/e2eutils/geth/fakepos.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" @@ -105,6 +106,7 @@ func (f *fakePoS) Start() error { } parentBeaconBlockRoot := f.FakeBeaconBlockRoot(head.Time) // parent beacon block root isCancun := f.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(head.Number.Uint64()+1), newBlockTime) + isPrague := f.eth.BlockChain().Config().IsPrague(new(big.Int).SetUint64(head.Number.Uint64()+1), newBlockTime) if isCancun { attrs.BeaconRoot = &parentBeaconBlockRoot } @@ -139,7 +141,9 @@ func (f *fakePoS) Start() error { return nil } var envelope *engine.ExecutionPayloadEnvelope - if isCancun { + if isPrague { + envelope, err = f.engineAPI.GetPayloadV4(*res.PayloadID) + } else if isCancun { envelope, err = f.engineAPI.GetPayloadV3(*res.PayloadID) } else { envelope, err = f.engineAPI.GetPayloadV2(*res.PayloadID) @@ -164,7 +168,9 @@ func (f *fakePoS) Start() error { } } - if isCancun { + if isPrague { + _, err = f.engineAPI.NewPayloadV4(*envelope.ExecutionPayload, blobHashes, &parentBeaconBlockRoot, make([]hexutil.Bytes, 0)) + } else if isCancun { _, err = f.engineAPI.NewPayloadV3(*envelope.ExecutionPayload, blobHashes, &parentBeaconBlockRoot) } else { _, err = f.engineAPI.NewPayloadV2(*envelope.ExecutionPayload) diff --git a/op-e2e/system/da/brotli_batcher_test.go b/op-e2e/system/da/brotli_batcher_test.go index fd44c6365ea..3bc9b4b5dd9 100644 --- a/op-e2e/system/da/brotli_batcher_test.go +++ b/op-e2e/system/da/brotli_batcher_test.go @@ -86,7 +86,7 @@ func TestBrotliBatcherFjord(t *testing.T) { opts.Value = big.NewInt(1_000_000_000) opts.Nonce = 1 // Already have deposit opts.ToAddr = &common.Address{0xff, 0xff} - opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) + opts.Gas, err = core.IntrinsicGas(opts.Data, nil, nil, false, true, true, false) require.NoError(t, err) opts.VerifyOnClients(l2Verif) }) diff --git a/op-e2e/system/da/eip4844_test.go b/op-e2e/system/da/eip4844_test.go index 3ef45788809..7c5b0c89d06 100644 --- a/op-e2e/system/da/eip4844_test.go +++ b/op-e2e/system/da/eip4844_test.go @@ -10,6 +10,7 @@ import ( op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" @@ -17,7 +18,6 @@ import ( "github.com/stretchr/testify/require" batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" - "github.com/ethereum-optimism/optimism/op-e2e/bindings" gethutils "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" @@ -25,6 +25,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -138,7 +139,7 @@ func testSystem4844E2E(t *testing.T, multiBlob bool, daType batcherFlags.DataAva opts.ToAddr = &common.Address{0xff, 0xff} // put some random data in the tx to make it fill up eth.MaxBlobsPerBlobTx blobs (multi-blob case) opts.Data = testutils.RandomData(rand.New(rand.NewSource(420)), 400) - opts.Gas, err = core.IntrinsicGas(opts.Data, nil, false, true, true, false) + opts.Gas, err = core.IntrinsicGas(opts.Data, nil, nil, false, true, true, false) require.NoError(t, err) opts.VerifyOnClients(l2Verif) }) @@ -251,56 +252,46 @@ func toIndexedBlobHashes(hs ...common.Hash) []eth.IndexedBlobHash { // gas price. The L1 blob gas limit is set to a low value to speed up this process. func TestBatcherAutoDA(t *testing.T) { op_e2e.InitParallel(t) + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() - cfg := e2esys.EcotoneSystemConfig(t, new(hexutil.Uint64)) + // System setup + cfg := e2esys.HoloceneSystemConfig(t, new(hexutil.Uint64)) + cfg.DeployConfig.L1PragueTimeOffset = new(hexutil.Uint64) // activate prague to get higher calldata cost cfg.DataAvailabilityType = batcherFlags.AutoType // We set the genesis fee values and block gas limit such that calldata txs are initially cheaper, - // but then drive up the base fee over the coming L1 blocks such that blobs become cheaper again. - cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(7500)) - // 100 blob targets leads to 130_393 starting blob base fee, which is ~ 16 * 8_150 + // but then manipulate the fee markets over the coming L1 blocks such that blobs become cheaper again. + cfg.DeployConfig.L1GenesisBlockBaseFeePerGas = (*hexutil.Big)(big.NewInt(3100)) + // 100 blob targets leads to 130_393 starting blob base fee, which is ~ 42 * 2_000 (equilibrium is ~16x or ~40x under Pectra) cfg.DeployConfig.L1GenesisBlockExcessBlobGas = (*hexutil.Uint64)(u64Ptr(100 * params.BlobTxTargetBlobGasPerBlock)) cfg.DeployConfig.L1GenesisBlockBlobGasUsed = (*hexutil.Uint64)(u64Ptr(0)) - cfg.DeployConfig.L1GenesisBlockGasLimit = 2_500_000 // low block gas limit to drive up gas price more quickly - t.Logf("L1BlockTime: %d, L2BlockTime: %d", cfg.DeployConfig.L1BlockTime, cfg.DeployConfig.L2BlockTime) - + cfg.DeployConfig.L1GenesisBlockGasLimit = 2_500_000 cfg.BatcherTargetNumFrames = eth.MaxBlobsPerBlobTx - sys, err := cfg.Start(t) require.NoError(t, err, "Error starting up system") - log := testlog.Logger(t, log.LevelInfo) log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) + // Constants l1Client := sys.NodeClient("l1") - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - ethPrivKey := cfg.Secrets.Alice + depositContract, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) + require.NoError(t, err) + depAmount := big.NewInt(1_000_000_000_000) + opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig()) + require.NoError(t, err) + opts.Value = depAmount fromAddr := cfg.Secrets.Addresses().Alice + const numTxs = 25 - // Send deposit transactions in a loop to drive up L1 base fee - depAmount := big.NewInt(1_000_000_000_000) - const numDeps = 3 - txs := make([]*types.Transaction, 0, numDeps) - t.Logf("Sending %d deposits...", numDeps) - for i := int64(0); i < numDeps; i++ { - opts, err := bind.NewKeyedTransactorWithChainID(ethPrivKey, cfg.L1ChainIDBig()) + // Helpers + mustGetFees := func() (*big.Int, *big.Int, *big.Int, float64) { + tip, baseFee, blobFee, err := txmgr.DefaultGasPriceEstimatorFn(ctx, l1Client) require.NoError(t, err) - opts.Value = depAmount - opts.Nonce = big.NewInt(i) - depositContract, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) - require.NoError(t, err) - - tx, err := transactions.PadGasEstimate(opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) { - return depositContract.DepositTransaction(opts, fromAddr, depAmount, 1_000_000, false, nil) - }) - require.NoErrorf(t, err, "failed to send deposit tx[%d]", i) - t.Logf("Deposit submitted[%d]: tx hash: %v", i, tx.Hash()) - txs = append(txs, tx) + feeRatio := float64(blobFee.Int64()) / float64(baseFee.Int64()+tip.Int64()) + t.Logf("L1 fees are: baseFee(%d), tip(%d), blobBaseFee(%d). feeRatio: %f", baseFee, tip, blobFee, feeRatio) + return tip, baseFee, blobFee, feeRatio } - require.Len(t, txs, numDeps) - requireEventualBatcherTxType := func(txType uint8, timeout time.Duration, strict bool) { var foundOtherTxType bool require.Eventually(t, func() bool { @@ -320,17 +311,52 @@ func TestBatcherAutoDA(t *testing.T) { }, timeout, time.Second, "expected batcher tx type didn't arrive") require.False(t, foundOtherTxType, "unexpected batcher tx type found") } + + // Check markets are set up as expected. + _, _, _, feeRatio := mustGetFees() + require.Greater(t, feeRatio, 41.0, "expected feeRatio to be greater than 41 (calldata should be cheaper, even with Pectra)") + + // Market manipulations: + // Send deposit transactions in a loop to shore up L1 base fee + // as blobBaseFee drops (batcher uses calldata initially so the blob market is quiet). + txs := make([]*types.Transaction, 0, numTxs) + t.Logf("Sending %d l1 txs...", numTxs) + for i := int64(0); i < numTxs; i++ { + opts.Nonce = big.NewInt(i) + tx, err := transactions.PadGasEstimate(opts, 2, func(opts *bind.TransactOpts) (*types.Transaction, error) { + return depositContract.DepositTransaction(opts, fromAddr, depAmount, 800_000, false, nil) + }) + require.NoErrorf(t, err, "failed to send deposit tx[%d]", i) + t.Logf("Deposit submitted[%d]: tx hash: %v", i, tx.Hash()) + txs = append(txs, tx) + } + // At this point, we didn't wait on any blocks yet, so we can check that // the first batcher tx used calldata. requireEventualBatcherTxType(types.DynamicFeeTxType, 8*time.Second, true) - t.Logf("Confirming %d deposits on L1...", numDeps) + // Now wait for txs to confirm on L1: + t.Logf("Confirming %d txs on L1...", numTxs) + blockNum := 0 for i, tx := range txs { rec, err := wait.ForReceiptOK(ctx, l1Client, tx.Hash()) - require.NoErrorf(t, err, "Waiting for deposit[%d] tx on L1", i) - t.Logf("Deposit confirmed[%d]: L1 block num: %v, gas used: %d", i, rec.BlockNumber, rec.GasUsed) + require.NoErrorf(t, err, "Waiting for tx[%d] on L1", i) + t.Logf("Tx confirmed[%d]: L1 block num: %v, gas used: %d", i, rec.BlockNumber, rec.GasUsed) + if rec.BlockNumber.Int64() > int64(blockNum) { + blockNum = int(rec.BlockNumber.Int64()) + block, err := l1Client.BlockByNumber(ctx, rec.BlockNumber) + require.NoError(t, err) + t.Logf("gas used %d/%d", block.GasUsed(), block.GasLimit()) + _, _, _, feeRatio = mustGetFees() + if feeRatio < 16.0 { + break + } + } } + // Check we managed to manipulate the markets correctly. + require.Less(t, feeRatio, 16.0, "expected fee ratio to be less than 16 (blobspace should be cheaper, even without Pectra)") + // Now wait for batcher to have switched to blob txs. requireEventualBatcherTxType(types.BlobTxType, 8*time.Second, false) } diff --git a/op-e2e/system/gastoken/gastoken_test.go b/op-e2e/system/gastoken/gastoken_test.go deleted file mode 100644 index f9c66d539ec..00000000000 --- a/op-e2e/system/gastoken/gastoken_test.go +++ /dev/null @@ -1,624 +0,0 @@ -package gastoken - -import ( - "context" - "math/big" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-e2e/config" - - op_e2e "github.com/ethereum-optimism/optimism/op-e2e" - - "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" - "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" - - "github.com/ethereum-optimism/optimism/op-e2e/bindings" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/receipts" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/predeploys" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// setup expectations using custom gas token -type cgtTestExpectations struct { - tokenAddress common.Address - tokenName string - tokenSymbol string - tokenDecimals uint8 -} - -func TestMain(m *testing.M) { - op_e2e.RunMain(m) -} - -func TestCustomGasToken_L2OO(t *testing.T) { - testCustomGasToken(t, config.AllocTypeL2OO) -} - -func TestCustomGasToken_Standard(t *testing.T) { - t.Skip("Custom gas token not supported") - testCustomGasToken(t, config.AllocTypeStandard) -} - -func testCustomGasToken(t *testing.T, allocType config.AllocType) { - op_e2e.InitParallel(t) - - disabledExpectations := cgtTestExpectations{ - common.HexToAddress("0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"), - "Ether", - "ETH", - uint8(18), - } - - setup := func(t *testing.T) gasTokenTestOpts { - cfg := e2esys.DefaultSystemConfig(t, e2esys.WithAllocType(allocType)) - offset := hexutil.Uint64(0) - cfg.DeployConfig.L2GenesisRegolithTimeOffset = &offset - cfg.DeployConfig.L1CancunTimeOffset = &offset - cfg.DeployConfig.L2GenesisCanyonTimeOffset = &offset - cfg.DeployConfig.L2GenesisDeltaTimeOffset = &offset - cfg.DeployConfig.L2GenesisEcotoneTimeOffset = &offset - - sys, err := cfg.Start(t) - require.NoError(t, err, "Error starting up system") - - l1Client := sys.NodeClient("l1") - aliceOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L1ChainIDBig()) - require.NoError(t, err) - - // Deploy WETH9, we'll use this as our custom gas token for the purpose of the test - weth9Address, tx, weth9, err := bindings.DeployWETH9(aliceOpts, l1Client) - require.NoError(t, err) - _, err = wait.ForReceiptOK(context.Background(), l1Client, tx.Hash()) - require.NoError(t, err) - - enabledExpectations := cgtTestExpectations{} - enabledExpectations.tokenAddress = weth9Address - enabledExpectations.tokenName, err = weth9.Name(&bind.CallOpts{}) - require.NoError(t, err) - enabledExpectations.tokenSymbol, err = weth9.Symbol(&bind.CallOpts{}) - require.NoError(t, err) - enabledExpectations.tokenDecimals, err = weth9.Decimals(&bind.CallOpts{}) - require.NoError(t, err) - - // Get some WETH - aliceOpts.Value = big.NewInt(10_000_000) - tx, err = weth9.Deposit(aliceOpts) - waitForTx(t, tx, err, l1Client) - aliceOpts.Value = nil - newBalance, err := weth9.BalanceOf(&bind.CallOpts{}, aliceOpts.From) - require.NoError(t, err) - require.Equal(t, newBalance, big.NewInt(10_000_000)) - - return gasTokenTestOpts{ - aliceOpts: aliceOpts, - cfg: cfg, - weth9: weth9, - weth9Address: weth9Address, - allocType: allocType, - sys: sys, - enabledExpectations: enabledExpectations, - disabledExpectations: disabledExpectations, - } - } - - t.Run("deposit", func(t *testing.T) { - op_e2e.InitParallel(t) - gto := setup(t) - checkDeposit(t, gto, false) - setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address, allocType) - checkDeposit(t, gto, true) - }) - - t.Run("withdrawal", func(t *testing.T) { - op_e2e.InitParallel(t) - gto := setup(t) - setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address, allocType) - checkDeposit(t, gto, true) - checkWithdrawal(t, gto) - }) - - t.Run("fee withdrawal", func(t *testing.T) { - op_e2e.InitParallel(t) - gto := setup(t) - setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address, allocType) - checkDeposit(t, gto, true) - checkFeeWithdrawal(t, gto, true) - }) - - t.Run("token name and symbol", func(t *testing.T) { - op_e2e.InitParallel(t) - gto := setup(t) - checkL1TokenNameAndSymbol(t, gto, gto.disabledExpectations) - checkL2TokenNameAndSymbol(t, gto, gto.disabledExpectations) - checkWETHTokenNameAndSymbol(t, gto, gto.disabledExpectations) - setCustomGasToken(t, gto.cfg, gto.sys, gto.weth9Address, allocType) - checkL1TokenNameAndSymbol(t, gto, gto.enabledExpectations) - checkL2TokenNameAndSymbol(t, gto, gto.enabledExpectations) - checkWETHTokenNameAndSymbol(t, gto, gto.enabledExpectations) - }) -} - -// setCustomGasToken enables the Custom Gas Token feature on a chain where it wasn't enabled at genesis. -// It reads existing parameters from the SystemConfig contract, inserts the supplied cgtAddress and reinitializes that contract. -// To do this it uses the ProxyAdmin and StorageSetter from the supplied cfg. -func setCustomGasToken(t *testing.T, cfg e2esys.SystemConfig, sys *e2esys.System, cgtAddress common.Address, allocType config.AllocType) { - l1Client := sys.NodeClient("l1") - deployerOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Deployer, cfg.L1ChainIDBig()) - require.NoError(t, err) - - // Bind a SystemConfig at the SystemConfigProxy address - systemConfig, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1Client) - require.NoError(t, err) - - // Get existing parameters from SystemConfigProxy contract - owner, err := systemConfig.Owner(&bind.CallOpts{}) - require.NoError(t, err) - basefeeScalar, err := systemConfig.BasefeeScalar(&bind.CallOpts{}) - require.NoError(t, err) - blobbasefeeScalar, err := systemConfig.BlobbasefeeScalar(&bind.CallOpts{}) - require.NoError(t, err) - batcherHash, err := systemConfig.BatcherHash(&bind.CallOpts{}) - require.NoError(t, err) - gasLimit, err := systemConfig.GasLimit(&bind.CallOpts{}) - require.NoError(t, err) - unsafeBlockSigner, err := systemConfig.UnsafeBlockSigner(&bind.CallOpts{}) - require.NoError(t, err) - resourceConfig, err := systemConfig.ResourceConfig(&bind.CallOpts{}) - require.NoError(t, err) - batchInbox, err := systemConfig.BatchInbox(&bind.CallOpts{}) - require.NoError(t, err) - addresses := bindings.SystemConfigAddresses{} - addresses.L1CrossDomainMessenger, err = systemConfig.L1CrossDomainMessenger(&bind.CallOpts{}) - require.NoError(t, err) - addresses.L1ERC721Bridge, err = systemConfig.L1ERC721Bridge(&bind.CallOpts{}) - require.NoError(t, err) - addresses.L1StandardBridge, err = systemConfig.L1StandardBridge(&bind.CallOpts{}) - require.NoError(t, err) - addresses.DisputeGameFactory, err = systemConfig.DisputeGameFactory(&bind.CallOpts{}) - require.NoError(t, err) - addresses.OptimismPortal, err = systemConfig.OptimismPortal(&bind.CallOpts{}) - require.NoError(t, err) - addresses.OptimismMintableERC20Factory, err = systemConfig.OptimismMintableERC20Factory(&bind.CallOpts{}) - require.NoError(t, err) - - // Queue up custom gas token address ready for reinitialization - addresses.GasPayingToken = cgtAddress - - // Bind a ProxyAdmin to the ProxyAdmin address - proxyAdmin, err := bindings.NewProxyAdmin(cfg.L1Deployments.ProxyAdmin, l1Client) - require.NoError(t, err) - - // Deploy a new StorageSetter contract - storageSetterAddr, tx, _, err := bindings.DeployStorageSetter(deployerOpts, l1Client) - waitForTx(t, tx, err, l1Client) - - // Set up a signer which controls the Proxy Admin. - // The deploy config's finalSystemOwner is the owner of the ProxyAdmin as well as the SystemConfig, - // so we can use that address for the proxy admin owner. - ownerSecret := cfg.Secrets.Deployer - if allocType == config.AllocTypeL2OO { - ownerSecret = cfg.Secrets.SysCfgOwner - } - proxyAdminOwnerOpts, err := bind.NewKeyedTransactorWithChainID(ownerSecret, cfg.L1ChainIDBig()) - require.NoError(t, err) - - // Execute the upgrade SystemConfigProxy -> StorageSetter via ProxyAdmin - tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, storageSetterAddr) - waitForTx(t, tx, err, l1Client) - - // Bind a StorageSetter to the SystemConfigProxy address - storageSetter, err := bindings.NewStorageSetter(cfg.L1Deployments.SystemConfigProxy, l1Client) - require.NoError(t, err) - - // Use StorageSetter to clear out "initialize" slot - tx, err = storageSetter.SetBytes320(deployerOpts, [32]byte{0}, [32]byte{0}) - waitForTx(t, tx, err, l1Client) - - // Sanity check previous step worked - currentSlotValue, err := storageSetter.GetBytes32(&bind.CallOpts{}, [32]byte{0}) - require.NoError(t, err) - require.Equal(t, currentSlotValue, [32]byte{0}) - - // Execute SystemConfigProxy -> SystemConfig upgrade - tx, err = proxyAdmin.Upgrade(proxyAdminOwnerOpts, cfg.L1Deployments.SystemConfigProxy, cfg.L1Deployments.SystemConfig) - waitForTx(t, tx, err, l1Client) - - // Reinitialise with existing initializer values but with custom gas token set - tx, err = systemConfig.Initialize(deployerOpts, owner, - basefeeScalar, - blobbasefeeScalar, - batcherHash, - gasLimit, - unsafeBlockSigner, - resourceConfig, - batchInbox, - addresses) - require.NoError(t, err) - receipt, err := wait.ForReceiptOK(context.Background(), l1Client, tx.Hash()) - require.NoError(t, err) - - // Read Custom Gas Token and check it has been set properly - gpt, err := systemConfig.GasPayingToken(&bind.CallOpts{}) - require.NoError(t, err) - require.Equal(t, cgtAddress, gpt.Addr) - - optimismPortal, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) - require.NoError(t, err) - - depositEvent, err := receipts.FindLog(receipt.Logs, optimismPortal.ParseTransactionDeposited) - require.NoError(t, err, "Should emit deposit event") - depositTx, err := derive.UnmarshalDepositLogEvent(&depositEvent.Raw) - - require.NoError(t, err) - l2Client := sys.NodeClient("sequencer") - receipt, err = wait.ForReceiptOK(context.Background(), l2Client, types.NewTx(depositTx).Hash()) - require.NoError(t, err) - - l1Block, err := bindings.NewL1Block(predeploys.L1BlockAddr, l2Client) - require.NoError(t, err) - _, err = receipts.FindLog(receipt.Logs, l1Block.ParseGasPayingTokenSet) - require.NoError(t, err) -} - -// waitForTx is a thing wrapper around wait.ForReceiptOK which asserts on there being no errors. -func waitForTx(t *testing.T, tx *types.Transaction, err error, client *ethclient.Client) { - require.NoError(t, err) - _, err = wait.ForReceiptOK(context.Background(), client, tx.Hash()) - require.NoError(t, err) -} - -type gasTokenTestOpts struct { - aliceOpts *bind.TransactOpts - cfg e2esys.SystemConfig - weth9 *bindings.WETH9 - weth9Address common.Address - allocType config.AllocType - sys *e2esys.System - enabledExpectations cgtTestExpectations - disabledExpectations cgtTestExpectations -} - -// Function to prepare and make call to depositERC20Transaction and make -// appropriate assertions dependent on whether custom gas tokens have been enabled or not. -func checkDeposit(t *testing.T, gto gasTokenTestOpts, enabled bool) { - aliceOpts := gto.aliceOpts - cfg := gto.cfg - l1Client := gto.sys.NodeClient("l1") - l2Client := gto.sys.NodeClient("sequencer") - weth9 := gto.weth9 - - // Set amount of WETH9 to bridge to the recipient on L2 - amountToBridge := big.NewInt(10) - recipient := common.HexToAddress("0xbeefdead") - - // Approve OptimismPortal - tx, err := weth9.Approve(aliceOpts, cfg.L1Deployments.OptimismPortalProxy, amountToBridge) - waitForTx(t, tx, err, l1Client) - - // Get recipient L2 balance before bridging - previousL2Balance, err := l2Client.BalanceAt(context.Background(), recipient, nil) - require.NoError(t, err) - - // Bridge the tokens - optimismPortal, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) - require.NoError(t, err) - tx, err = optimismPortal.DepositERC20Transaction(aliceOpts, - recipient, - amountToBridge, - amountToBridge, - 50_0000, // _gasLimit - false, - []byte{}, - ) - if enabled { - require.NoError(t, err) - receipt, err := wait.ForReceiptOK(context.Background(), l1Client, tx.Hash()) - require.NoError(t, err) - - // compute the deposit transaction hash + poll for it - depositEvent, err := receipts.FindLog(receipt.Logs, optimismPortal.ParseTransactionDeposited) - require.NoError(t, err, "Should emit deposit event") - depositTx, err := derive.UnmarshalDepositLogEvent(&depositEvent.Raw) - require.NoError(t, err) - _, err = wait.ForReceiptOK(context.Background(), l2Client, types.NewTx(depositTx).Hash()) - require.NoError(t, err) - - require.EventuallyWithT(t, func(t *assert.CollectT) { - // check for balance increase on L2 - newL2Balance, err := l2Client.BalanceAt(context.Background(), recipient, nil) - require.NoError(t, err) - l2BalanceIncrease := big.NewInt(0).Sub(newL2Balance, previousL2Balance) - require.Equal(t, amountToBridge, l2BalanceIncrease) - }, 10*time.Second, 1*time.Second) - } else { - require.Error(t, err) - } -} - -// Function to prepare and execute withdrawal flow for CGTs -// and assert token balance is increased on L1. -func checkWithdrawal(t *testing.T, gto gasTokenTestOpts) { - aliceOpts := gto.aliceOpts - cfg := gto.cfg - weth9 := gto.weth9 - allocType := gto.allocType - l1Client := gto.sys.NodeClient("l1") - l2Seq := gto.sys.NodeClient("sequencer") - l2Verif := gto.sys.NodeClient("verifier") - fromAddr := aliceOpts.From - ethPrivKey := cfg.Secrets.Alice - - // Start L2 balance for withdrawal - startBalanceBeforeWithdrawal, err := l2Seq.BalanceAt(context.Background(), fromAddr, nil) - require.NoError(t, err) - - withdrawAmount := big.NewInt(5) - tx, receipt := helpers.SendWithdrawal(t, cfg, l2Seq, cfg.Secrets.Alice, func(opts *helpers.WithdrawalTxOpts) { - opts.Value = withdrawAmount - opts.VerifyOnClients(l2Verif) - }) - - // Verify L2 balance after withdrawal - header, err := l2Verif.HeaderByNumber(context.Background(), receipt.BlockNumber) - require.NoError(t, err) - - endBalanceAfterWithdrawal, err := wait.ForBalanceChange(context.Background(), l2Seq, fromAddr, startBalanceBeforeWithdrawal) - require.NoError(t, err) - - // Take fee into account - diff := new(big.Int).Sub(startBalanceBeforeWithdrawal, endBalanceAfterWithdrawal) - fees := helpers.CalcGasFees(receipt.GasUsed, tx.GasTipCap(), tx.GasFeeCap(), header.BaseFee) - fees = fees.Add(fees, receipt.L1Fee) - diff = diff.Sub(diff, fees) - require.Equal(t, withdrawAmount, diff) - - // Take start token balance on L1 - startTokenBalanceBeforeFinalize, err := weth9.BalanceOf(&bind.CallOpts{}, fromAddr) - require.NoError(t, err) - - startETHBalanceBeforeFinalize, err := l1Client.BalanceAt(context.Background(), fromAddr, nil) - require.NoError(t, err) - - proveReceipt, finalizeReceipt, resolveClaimReceipt, resolveReceipt := helpers.ProveAndFinalizeWithdrawal(t, cfg, gto.sys, "verifier", ethPrivKey, receipt) - - // Verify L1 ETH balance change - proveFee := new(big.Int).Mul(new(big.Int).SetUint64(proveReceipt.GasUsed), proveReceipt.EffectiveGasPrice) - finalizeFee := new(big.Int).Mul(new(big.Int).SetUint64(finalizeReceipt.GasUsed), finalizeReceipt.EffectiveGasPrice) - fees = new(big.Int).Add(proveFee, finalizeFee) - if allocType.UsesProofs() { - resolveClaimFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveClaimReceipt.GasUsed), resolveClaimReceipt.EffectiveGasPrice) - resolveFee := new(big.Int).Mul(new(big.Int).SetUint64(resolveReceipt.GasUsed), resolveReceipt.EffectiveGasPrice) - fees = new(big.Int).Add(fees, resolveClaimFee) - fees = new(big.Int).Add(fees, resolveFee) - } - - // Verify L1ETHBalance after withdrawal - // On CGT chains, the only change in ETH balance from a withdrawal - // is a decrease to pay for gas - endETHBalanceAfterFinalize, err := l1Client.BalanceAt(context.Background(), fromAddr, nil) - require.NoError(t, err) - diff = new(big.Int).Sub(endETHBalanceAfterFinalize, startETHBalanceBeforeFinalize) - require.Equal(t, new(big.Int).Sub(big.NewInt(0), fees), diff) - - // Verify token balance after withdrawal - // L1 Fees are paid in ETH, and - // withdrawal is of a Custom Gas Token, so we do not subtract l1 fees from expected balance change - // as we would if ETH was the gas paying token - endTokenBalanceAfterFinalize, err := weth9.BalanceOf(&bind.CallOpts{}, fromAddr) - require.NoError(t, err) - diff = new(big.Int).Sub(endTokenBalanceAfterFinalize, startTokenBalanceBeforeFinalize) - require.Equal(t, withdrawAmount, diff) -} - -// checkFeeWithdrawal ensures that the FeeVault can be withdrawn from -func checkFeeWithdrawal(t *testing.T, gto gasTokenTestOpts, enabled bool) { - cfg := gto.cfg - weth9 := gto.weth9 - allocType := gto.allocType - l1Client := gto.sys.NodeClient("l1") - l2Client := gto.sys.NodeClient("sequencer") - - feeVault, err := bindings.NewSequencerFeeVault(predeploys.SequencerFeeVaultAddr, l2Client) - require.NoError(t, err) - - // Alice will be sending transactions - aliceOpts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L2ChainIDBig()) - require.NoError(t, err) - - // Get the recipient of the funds - recipient, err := feeVault.RECIPIENT(&bind.CallOpts{}) - require.NoError(t, err) - - // This test depends on the withdrawal network being L1 which is represented - // by 0 in the enum. - withdrawalNetwork, err := feeVault.WITHDRAWALNETWORK(&bind.CallOpts{}) - require.NoError(t, err) - require.Equal(t, withdrawalNetwork, uint8(0)) - - // Get the balance of the recipient on L1 - var recipientBalanceBefore *big.Int - if enabled { - recipientBalanceBefore, err = weth9.BalanceOf(&bind.CallOpts{}, recipient) - } else { - recipientBalanceBefore, err = l1Client.BalanceAt(context.Background(), recipient, nil) - } - require.NoError(t, err) - - // Get the min withdrawal amount for the FeeVault - amount, err := feeVault.MINWITHDRAWALAMOUNT(&bind.CallOpts{}) - require.NoError(t, err) - - l1opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Alice, cfg.L1ChainIDBig()) - require.NoError(t, err) - - optimismPortal, err := bindings.NewOptimismPortal(cfg.L1Deployments.OptimismPortalProxy, l1Client) - require.NoError(t, err) - - depositAmount := new(big.Int).Mul(amount, big.NewInt(14)) - l1opts.Value = depositAmount - - var receipt *types.Receipt - - // Alice deposits funds - if enabled { - // approve + transferFrom flow - // Cannot use `transfer` because of the tracking of balance in the OptimismPortal - dep, err := weth9.Deposit(l1opts) - waitForTx(t, dep, err, l1Client) - - l1opts.Value = nil - tx, err := weth9.Approve(l1opts, cfg.L1Deployments.OptimismPortalProxy, depositAmount) - waitForTx(t, tx, err, l1Client) - - require.NoError(t, err) - deposit, err := optimismPortal.DepositERC20Transaction(l1opts, cfg.Secrets.Addresses().Alice, depositAmount, depositAmount, 500_000, false, []byte{}) - waitForTx(t, deposit, err, l1Client) - - receipt, err = wait.ForReceiptOK(context.Background(), l1Client, deposit.Hash()) - require.NoError(t, err) - } else { - // send ether to the portal directly, alice already has funds on L2 - tx, err := optimismPortal.DepositTransaction(l1opts, cfg.Secrets.Addresses().Alice, depositAmount, 500_000, false, []byte{}) - waitForTx(t, tx, err, l1Client) - - receipt, err = wait.ForReceiptOK(context.Background(), l1Client, tx.Hash()) - require.NoError(t, err) - } - - // Compute the deposit transaction hash + poll for it - depositEvent, err := receipts.FindLog(receipt.Logs, optimismPortal.ParseTransactionDeposited) - require.NoError(t, err, "Should emit deposit event") - depositTx, err := derive.UnmarshalDepositLogEvent(&depositEvent.Raw) - require.NoError(t, err) - _, err = wait.ForReceiptOK(context.Background(), l2Client, types.NewTx(depositTx).Hash()) - require.NoError(t, err) - - // Get Alice's balance on L2 - aliceBalance, err := l2Client.BalanceAt(context.Background(), cfg.Secrets.Addresses().Alice, nil) - require.NoError(t, err) - require.GreaterOrEqual(t, aliceBalance.Uint64(), amount.Uint64()) - - // Send funds to the FeeVault so its balance is above the min withdrawal amount - aliceOpts.Value = amount - feeVaultTx, err := feeVault.Receive(aliceOpts) - waitForTx(t, feeVaultTx, err, l2Client) - - // Ensure that the balance of the vault is large enough to withdraw - vaultBalance, err := l2Client.BalanceAt(context.Background(), predeploys.SequencerFeeVaultAddr, nil) - require.NoError(t, err) - require.GreaterOrEqual(t, vaultBalance.Uint64(), amount.Uint64()) - - // Ensure there is code at the vault address - code, err := l2Client.CodeAt(context.Background(), predeploys.SequencerFeeVaultAddr, nil) - require.NoError(t, err) - require.NotEmpty(t, code) - - // Poke the fee vault to withdraw - l2Opts, err := bind.NewKeyedTransactorWithChainID(cfg.Secrets.Bob, cfg.L2ChainIDBig()) - require.NoError(t, err) - withdrawalTx, err := feeVault.Withdraw(l2Opts) - waitForTx(t, withdrawalTx, err, l2Client) - - // Get the receipt and the amount withdrawn - receipt, err = l2Client.TransactionReceipt(context.Background(), withdrawalTx.Hash()) - require.NoError(t, err) - - inclusionHeight := receipt.BlockNumber.Uint64() - it, err := feeVault.FilterWithdrawal(&bind.FilterOpts{ - Start: inclusionHeight, - End: &inclusionHeight, - }) - require.NoError(t, err) - require.True(t, it.Next()) - - withdrawnAmount := it.Event.Value - - // Finalize the withdrawal - proveReceipt, finalizeReceipt, resolveClaimReceipt, resolveReceipt := helpers.ProveAndFinalizeWithdrawal(t, cfg, gto.sys, "verifier", cfg.Secrets.Alice, receipt) - require.Equal(t, types.ReceiptStatusSuccessful, proveReceipt.Status) - require.Equal(t, types.ReceiptStatusSuccessful, finalizeReceipt.Status) - if allocType.UsesProofs() { - require.Equal(t, types.ReceiptStatusSuccessful, resolveClaimReceipt.Status) - require.Equal(t, types.ReceiptStatusSuccessful, resolveReceipt.Status) - } - - // Assert that the recipient's balance did increase - var recipientBalanceAfter *big.Int - if enabled { - recipientBalanceAfter, err = weth9.BalanceOf(&bind.CallOpts{}, recipient) - } else { - recipientBalanceAfter, err = l1Client.BalanceAt(context.Background(), recipient, nil) - } - require.NoError(t, err) - - require.Equal(t, recipientBalanceAfter, new(big.Int).Add(recipientBalanceBefore, withdrawnAmount)) -} - -func checkL1TokenNameAndSymbol(t *testing.T, gto gasTokenTestOpts, expectations cgtTestExpectations) { - l1Client := gto.sys.NodeClient("l1") - cfg := gto.cfg - - systemConfig, err := bindings.NewSystemConfig(cfg.L1Deployments.SystemConfigProxy, l1Client) - require.NoError(t, err) - - token, err := systemConfig.GasPayingToken(&bind.CallOpts{}) - require.NoError(t, err) - - name, err := systemConfig.GasPayingTokenName(&bind.CallOpts{}) - require.NoError(t, err) - - symbol, err := systemConfig.GasPayingTokenSymbol(&bind.CallOpts{}) - require.NoError(t, err) - - require.Equal(t, expectations.tokenAddress, token.Addr) - require.Equal(t, expectations.tokenDecimals, token.Decimals) - require.Equal(t, expectations.tokenName, name) - require.Equal(t, expectations.tokenSymbol, symbol) -} - -func checkL2TokenNameAndSymbol(t *testing.T, gto gasTokenTestOpts, enabledExpectations cgtTestExpectations) { - l2Client := gto.sys.NodeClient("sequencer") - - l1Block, err := bindings.NewL1Block(predeploys.L1BlockAddr, l2Client) - require.NoError(t, err) - - token, err := l1Block.GasPayingToken(&bind.CallOpts{}) - require.NoError(t, err) - - name, err := l1Block.GasPayingTokenName(&bind.CallOpts{}) - require.NoError(t, err) - - symbol, err := l1Block.GasPayingTokenSymbol(&bind.CallOpts{}) - require.NoError(t, err) - - require.Equal(t, enabledExpectations.tokenAddress, token.Addr) - require.Equal(t, enabledExpectations.tokenDecimals, token.Decimals) - require.Equal(t, enabledExpectations.tokenName, name) - require.Equal(t, enabledExpectations.tokenSymbol, symbol) -} - -func checkWETHTokenNameAndSymbol(t *testing.T, gto gasTokenTestOpts, expectations cgtTestExpectations) { - l2Client := gto.sys.NodeClient("sequencer") - - // Check name and symbol in WETH predeploy - weth, err := bindings.NewWETH(predeploys.WETHAddr, l2Client) - require.NoError(t, err) - - name, err := weth.Name(&bind.CallOpts{}) - require.NoError(t, err) - - symbol, err := weth.Symbol(&bind.CallOpts{}) - require.NoError(t, err) - - require.Equal(t, "Wrapped "+expectations.tokenName, name) - require.Equal(t, "W"+expectations.tokenSymbol, symbol) -} diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index b2835da78cb..e8694e0476c 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -182,8 +182,13 @@ var ( L1CacheSize = &cli.UintFlag{ Name: "l1.cache-size", Usage: "Cache size for blocks, receipts and transactions. " + - "It's optional and a sane default of 3/2 the sequencing window size is used if this field is set to 0.", + "If this flag is set to 0, 2/3 of the sequencing window size is used (usually 2400). " + + "The default value of 900 (~3h of L1 blocks) is good for (high-throughput) networks that see frequent safe head increments. " + + "On (low-throughput) networks with infrequent safe head increments, it is recommended to set this value to 0, " + + "or a value that well covers the typical span between safe head increments. " + + "Note that higher values will cause significantly increased memory usage.", EnvVars: prefixEnvVars("L1_CACHE_SIZE"), + Value: 900, // ~3h of L1 blocks Category: L1RPCCategory, } L1HTTPPollInterval = &cli.DurationFlag{ @@ -204,6 +209,13 @@ var ( }(), Category: RollupCategory, } + L2EngineRpcTimeout = &cli.DurationFlag{ + Name: "l2.engine-rpc-timeout", + Usage: "L2 engine client rpc timeout", + EnvVars: prefixEnvVars("L2_ENGINE_RPC_TIMEOUT"), + Value: time.Second * 10, + Category: RollupCategory, + } VerifierL1Confs = &cli.Uint64Flag{ Name: "verifier.l1-confs", Usage: "Number of L1 blocks to keep distance from the L1 head before deriving L2 data from. Reorgs are supported, but may be slow to perform.", @@ -454,6 +466,7 @@ var optionalFlags = []cli.Flag{ ConductorRpcTimeoutFlag, SafeDBPath, L2EngineKind, + L2EngineRpcTimeout, InteropSupervisor, InteropRPCAddr, InteropRPCPort, diff --git a/op-node/node/client.go b/op-node/node/client.go index 3796bb98d05..9659f7da62b 100644 --- a/op-node/node/client.go +++ b/op-node/node/client.go @@ -47,6 +47,10 @@ type L2EndpointConfig struct { // JWT secrets for L2 Engine API authentication during HTTP or initial Websocket communication. // Any value for an IPC connection. L2EngineJWTSecret [32]byte + + // L2EngineCallTimeout is the default timeout duration for L2 calls. + // Defines the maximum time a call to the L2 engine is allowed to take before timing out. + L2EngineCallTimeout time.Duration } var _ L2EndpointSetup = (*L2EndpointConfig)(nil) @@ -67,6 +71,7 @@ func (cfg *L2EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf opts := []client.RPCOption{ client.WithGethRPCOptions(auth), client.WithDialAttempts(10), + client.WithCallTimeout(cfg.L2EngineCallTimeout), } l2Node, err := client.NewRPC(ctx, log, cfg.L2EngineAddr, opts...) if err != nil { diff --git a/op-node/p2p/cli/load_config.go b/op-node/p2p/cli/load_config.go index 57ae1c221c0..e66234e0684 100644 --- a/op-node/p2p/cli/load_config.go +++ b/op-node/p2p/cli/load_config.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net" + "net/url" "os" "strings" @@ -177,23 +178,30 @@ func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error { return fmt.Errorf("failed to open discovery db: %w", err) } - bootnodes := make([]*enode.Node, 0) records := strings.Split(ctx.String(flags.BootnodesName), ",") - for i, recordB64 := range records { - recordB64 = strings.TrimSpace(recordB64) - if recordB64 == "" { // ignore empty records + if len(records) == 0 { + records = p2p.DefaultBootnodes + } + + for i, record := range records { + record = strings.TrimSpace(record) + if record == "" { // ignore empty records continue } - nodeRecord, err := enode.Parse(enode.ValidSchemes, recordB64) + + // Resolve IP addresses of old enode URLs - geth doesn't do it any more. + if strings.HasPrefix(record, "enode://") { + record, err = resolveURLIP(record, net.LookupIP) + if err != nil { + return fmt.Errorf("resolving IP of enode URL %q: %w", record, err) + } + } + + nodeRecord, err := enode.Parse(enode.ValidSchemes, record) if err != nil { - return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), recordB64, err) + return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), record, err) } - bootnodes = append(bootnodes, nodeRecord) - } - if len(bootnodes) > 0 { - conf.Bootnodes = bootnodes - } else { - conf.Bootnodes = p2p.DefaultBootnodes + conf.Bootnodes = append(conf.Bootnodes, nodeRecord) } if ctx.IsSet(flags.NetRestrictName) { @@ -207,6 +215,34 @@ func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error { return nil } +func resolveURLIP(rawurl string, lookupIP func(name string) ([]net.IP, error)) (string, error) { + u, err := url.Parse(rawurl) + if err != nil { + return "", fmt.Errorf("parsing URL %q: %w", rawurl, err) + } + ip := net.ParseIP(u.Hostname()) + if ip == nil { + ips, err := lookupIP(u.Hostname()) + if err != nil { + return "", fmt.Errorf("looking up IP for hostname %q: %w", u.Hostname(), err) + } + ip = ips[0] + } + + // Ensure the IP is 4 bytes long for IPv4 addresses. + if ipv4 := ip.To4(); ipv4 != nil { + ip = ipv4 + } + + // reassemble + port := u.Port() + u.Host = ip.String() + if port != "" { + u.Host += ":" + port + } + return u.String(), nil +} + func loadLibp2pOpts(conf *p2p.Config, ctx *cli.Context) error { addrs := strings.Split(ctx.String(flags.StaticPeersName), ",") for i, addr := range addrs { @@ -289,7 +325,7 @@ func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Secp256k1PrivateKey, error) { if keyPath == "" { return nil, errors.New("no p2p private key path specified, cannot auto-generate key without path") } - f, err := os.OpenFile(keyPath, os.O_RDONLY, 0600) + f, err := os.OpenFile(keyPath, os.O_RDONLY, 0o600) if os.IsNotExist(err) { p, _, err := crypto.GenerateSecp256k1Key(rand.Reader) if err != nil { @@ -299,7 +335,7 @@ func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Secp256k1PrivateKey, error) { if err != nil { return nil, fmt.Errorf("failed to encode new p2p priv key: %w", err) } - f, err := os.OpenFile(keyPath, os.O_CREATE|os.O_WRONLY, 0600) + f, err := os.OpenFile(keyPath, os.O_CREATE|os.O_WRONLY, 0o600) if err != nil { return nil, fmt.Errorf("failed to store new p2p priv key: %w", err) } diff --git a/op-node/p2p/cli/load_config_test.go b/op-node/p2p/cli/load_config_test.go new file mode 100644 index 00000000000..d2e1052c64c --- /dev/null +++ b/op-node/p2p/cli/load_config_test.go @@ -0,0 +1,73 @@ +package cli + +import ( + "errors" + "net" + "testing" + + "github.com/ethereum-optimism/optimism/op-node/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/stretchr/testify/require" +) + +func lookupIP(name string) ([]net.IP, error) { + if name == "bootnode.conduit.xyz" { + return []net.IP{{35, 197, 61, 230}}, nil + } + return nil, errors.New("no such host") +} + +func TestResolveURLIP(t *testing.T) { + for _, test := range []struct { + url string + expUrl string + expErr string + }{ + { + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.conduit.xyz:0?discport=30301", + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@35.197.61.230:0?discport=30301", + "", + }, + { + "enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305", + "enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305", + "", + }, + { + "enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305", + "enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305", + "", + }, + { + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.foo.bar:0?discport=30301", + "", + "no such host", + }, + { + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@::ffff:35.197.61.230:0?discport=30301", + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@35.197.61.230:0?discport=30301", + "", + }, + } { + u, err := resolveURLIP(test.url, lookupIP) + if test.expErr != "" { + require.Contains(t, err.Error(), test.expErr) + } else { + require.NoError(t, err) + require.Equal(t, test.expUrl, u) + } + } +} + +// TestDefaultBootnodes checks that the default bootnodes are valid enode specifiers. +// The default boodnodes use to be specified with [enode.MustParse]. But then upstream geth +// stopped resolving DNS host names in old enode specifiers. So this resolution got moved +// into the op-node's initP2P function. Because it is only run at runtime, this test +// ensures that the specifiers are valid (without DNS resolution, which is fine). +func TestDefaultBootnodes(t *testing.T) { + for _, record := range p2p.DefaultBootnodes { + nodeRecord, err := enode.Parse(enode.ValidSchemes, record) + require.NoError(t, err) + require.NotNil(t, nodeRecord) + } +} diff --git a/op-node/p2p/config.go b/op-node/p2p/config.go index 10a75881b87..da74e742c91 100644 --- a/op-node/p2p/config.go +++ b/op-node/p2p/config.go @@ -25,20 +25,20 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" ) -var DefaultBootnodes = []*enode.Node{ +var DefaultBootnodes = []string{ // OP Labs - enode.MustParse("enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305"), - enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:30305"), + "enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305", + "enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:30305", // Base - enode.MustParse("enr:-J24QNz9lbrKbN4iSmmjtnr7SjUMk4zB7f1krHZcTZx-JRKZd0kA2gjufUROD6T3sOWDVDnFJRvqBBo62zuF-hYCohOGAYiOoEyEgmlkgnY0gmlwhAPniryHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQKNVFlCxh_B-716tTs-h1vMzZkSs1FTu_OYTNjgufplG4N0Y3CCJAaDdWRwgiQG"), - enode.MustParse("enr:-J24QH-f1wt99sfpHy4c0QJM-NfmsIfmlLAMMcgZCUEgKG_BBYFc6FwYgaMJMQN5dsRBJApIok0jFn-9CS842lGpLmqGAYiOoDRAgmlkgnY0gmlwhLhIgb2Hb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJ9FTIv8B9myn1MWaC_2lJ-sMoeCDkusCsk4BYHjjCq04N0Y3CCJAaDdWRwgiQG"), - enode.MustParse("enr:-J24QDXyyxvQYsd0yfsN0cRr1lZ1N11zGTplMNlW4xNEc7LkPXh0NAJ9iSOVdRO95GPYAIc6xmyoCCG6_0JxdL3a0zaGAYiOoAjFgmlkgnY0gmlwhAPckbGHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJwoS7tzwxqXSyFL7g0JM-KWVbgvjfB8JA__T7yY_cYboN0Y3CCJAaDdWRwgiQG"), - enode.MustParse("enr:-J24QHmGyBwUZXIcsGYMaUqGGSl4CFdx9Tozu-vQCn5bHIQbR7On7dZbU61vYvfrJr30t0iahSqhc64J46MnUO2JvQaGAYiOoCKKgmlkgnY0gmlwhAPnCzSHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQINc4fSijfbNIiGhcgvwjsjxVFJHUstK9L1T8OTKUjgloN0Y3CCJAaDdWRwgiQG"), - enode.MustParse("enr:-J24QG3ypT4xSu0gjb5PABCmVxZqBjVw9ca7pvsI8jl4KATYAnxBmfkaIuEqy9sKvDHKuNCsy57WwK9wTt2aQgcaDDyGAYiOoGAXgmlkgnY0gmlwhDbGmZaHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQIeAK_--tcLEiu7HvoUlbV52MspE0uCocsx1f_rYvRenIN0Y3CCJAaDdWRwgiQG"), + "enr:-J24QNz9lbrKbN4iSmmjtnr7SjUMk4zB7f1krHZcTZx-JRKZd0kA2gjufUROD6T3sOWDVDnFJRvqBBo62zuF-hYCohOGAYiOoEyEgmlkgnY0gmlwhAPniryHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQKNVFlCxh_B-716tTs-h1vMzZkSs1FTu_OYTNjgufplG4N0Y3CCJAaDdWRwgiQG", + "enr:-J24QH-f1wt99sfpHy4c0QJM-NfmsIfmlLAMMcgZCUEgKG_BBYFc6FwYgaMJMQN5dsRBJApIok0jFn-9CS842lGpLmqGAYiOoDRAgmlkgnY0gmlwhLhIgb2Hb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJ9FTIv8B9myn1MWaC_2lJ-sMoeCDkusCsk4BYHjjCq04N0Y3CCJAaDdWRwgiQG", + "enr:-J24QDXyyxvQYsd0yfsN0cRr1lZ1N11zGTplMNlW4xNEc7LkPXh0NAJ9iSOVdRO95GPYAIc6xmyoCCG6_0JxdL3a0zaGAYiOoAjFgmlkgnY0gmlwhAPckbGHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJwoS7tzwxqXSyFL7g0JM-KWVbgvjfB8JA__T7yY_cYboN0Y3CCJAaDdWRwgiQG", + "enr:-J24QHmGyBwUZXIcsGYMaUqGGSl4CFdx9Tozu-vQCn5bHIQbR7On7dZbU61vYvfrJr30t0iahSqhc64J46MnUO2JvQaGAYiOoCKKgmlkgnY0gmlwhAPnCzSHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQINc4fSijfbNIiGhcgvwjsjxVFJHUstK9L1T8OTKUjgloN0Y3CCJAaDdWRwgiQG", + "enr:-J24QG3ypT4xSu0gjb5PABCmVxZqBjVw9ca7pvsI8jl4KATYAnxBmfkaIuEqy9sKvDHKuNCsy57WwK9wTt2aQgcaDDyGAYiOoGAXgmlkgnY0gmlwhDbGmZaHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQIeAK_--tcLEiu7HvoUlbV52MspE0uCocsx1f_rYvRenIN0Y3CCJAaDdWRwgiQG", // Conduit - enode.MustParse("enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.conduit.xyz:0?discport=30301"), - enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305"), - enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:0?discport=30305"), + "enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.conduit.xyz:0?discport=30301", + "enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305", + "enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:0?discport=30305", } type HostMetrics interface { diff --git a/op-node/rollup/derive/blob_data_source_test.go b/op-node/rollup/derive/blob_data_source_test.go index e5e31dc957b..a20205544c4 100644 --- a/op-node/rollup/derive/blob_data_source_test.go +++ b/op-node/rollup/derive/blob_data_source_test.go @@ -28,7 +28,7 @@ func TestDataAndHashesFromTxs(t *testing.T) { logger := testlog.Logger(t, log.LvlInfo) chainId := new(big.Int).SetUint64(rng.Uint64()) - signer := types.NewCancunSigner(chainId) + signer := types.NewPragueSigner(chainId) config := DataSourceConfig{ l1Signer: signer, batchInboxAddress: batchInboxAddr, @@ -87,6 +87,20 @@ func TestDataAndHashesFromTxs(t *testing.T) { data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr, logger) require.Equal(t, 0, len(data)) require.Equal(t, 0, len(blobHashes)) + + // make sure SetCode transactions are ignored. + setCodeTxData := &types.SetCodeTx{ + Nonce: rng.Uint64(), + Gas: 2_000_000, + To: batchInboxAddr, + Data: testutils.RandomData(rng, rng.Intn(1000)), + } + setCodeTx, err := types.SignNewTx(privateKey, signer, setCodeTxData) + require.NoError(t, err) + txs = types.Transactions{setCodeTx} + data, blobHashes = dataAndHashesFromTxs(txs, &config, batcherAddr, logger) + require.Equal(t, 0, len(data)) + require.Equal(t, 0, len(blobHashes)) } func TestFillBlobPointers(t *testing.T) { diff --git a/op-node/rollup/derive/data_source.go b/op-node/rollup/derive/data_source.go index 8d064a7cdb8..dfeda599501 100644 --- a/op-node/rollup/derive/data_source.go +++ b/op-node/rollup/derive/data_source.go @@ -91,9 +91,15 @@ type DataSourceConfig struct { } // isValidBatchTx returns true if: -// 1. the transaction has a To() address that matches the batch inbox address, and -// 2. the transaction has a valid signature from the batcher address +// 1. the transaction type is any of Legacy, ACL, DynamicFee, Blob, or Deposit (for L3s). +// 2. the transaction has a To() address that matches the batch inbox address, and +// 3. the transaction has a valid signature from the batcher address func isValidBatchTx(tx *types.Transaction, l1Signer types.Signer, batchInboxAddr, batcherAddr common.Address, logger log.Logger) bool { + // For now, we want to disallow the SetCodeTx type or any future types. + if tx.Type() > types.BlobTxType && tx.Type() != types.DepositTxType { + return false + } + to := tx.To() if to == nil || *to != batchInboxAddr { return false diff --git a/op-node/rollup/derive/fuzz_parsers_test.go b/op-node/rollup/derive/fuzz_parsers_test.go index afb7e0850e2..f8f2374e6b3 100644 --- a/op-node/rollup/derive/fuzz_parsers_test.go +++ b/op-node/rollup/derive/fuzz_parsers_test.go @@ -65,7 +65,6 @@ func FuzzL1InfoBedrockRoundTrip(f *testing.F) { if !cmp.Equal(in, out, cmp.Comparer(testutils.BigEqual)) { t.Fatalf("The data did not round trip correctly. in: %v. out: %v", in, out) } - }) } @@ -105,7 +104,6 @@ func FuzzL1InfoEcotoneRoundTrip(f *testing.F) { if !cmp.Equal(in, out, cmp.Comparer(testutils.BigEqual)) { t.Fatalf("The Interop data did not round trip correctly. in: %v. out: %v", in, out) } - }) } @@ -171,7 +169,6 @@ func FuzzL1InfoBedrockAgainstContract(f *testing.F) { if !cmp.Equal(expected, actual, cmp.Comparer(testutils.BigEqual)) { t.Fatalf("The data did not round trip correctly. expected: %v. actual: %v", expected, actual) } - }) } @@ -255,7 +252,7 @@ func FuzzUnmarshallLogEvent(f *testing.F) { }) require.NoError(f, err) - _, err = state.Commit(0, false) + _, err = state.Commit(0, false, false) require.NoError(f, err) portalContract, err := bindings.NewOptimismPortal(addr, nil) diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go index 79a22812dec..276746719df 100644 --- a/op-node/rollup/driver/state.go +++ b/op-node/rollup/driver/state.go @@ -343,6 +343,8 @@ func (s *SyncDeriver) OnEvent(ev event.Event) bool { s.Emitter.Emit(StepReqEvent{ResetBackoff: true}) case engine.SafeDerivedEvent: s.onSafeDerivedBlock(x) + case derive.ProvideL1Traversal: + s.Emitter.Emit(StepReqEvent{}) default: return false } diff --git a/op-node/rollup/interop/managed/api.go b/op-node/rollup/interop/managed/api.go index df1429ae531..03de8aa2a88 100644 --- a/op-node/rollup/interop/managed/api.go +++ b/op-node/rollup/interop/managed/api.go @@ -67,6 +67,10 @@ func (ib *InteropAPI) PendingOutputV0AtTimestamp(ctx context.Context, timestamp return ib.backend.PendingOutputV0AtTimestamp(ctx, timestamp) } +func (ib *InteropAPI) L2BlockRefByTimestamp(ctx context.Context, timestamp uint64) (eth.L2BlockRef, error) { + return ib.backend.L2BlockRefByTimestamp(ctx, timestamp) +} + func (ib *InteropAPI) ProvideL1(ctx context.Context, nextL1 eth.BlockRef) error { return ib.backend.ProvideL1(ctx, nextL1) } diff --git a/op-node/rollup/interop/managed/attributes.go b/op-node/rollup/interop/managed/attributes.go index 54483ea68ad..db2950ab51a 100644 --- a/op-node/rollup/interop/managed/attributes.go +++ b/op-node/rollup/interop/managed/attributes.go @@ -14,6 +14,8 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) +var OptimisticBlockDepositSenderAddress = common.HexToAddress("0xdeaddeaddeaddeaddeaddeaddeaddeaddead0002") + // AttributesToReplaceInvalidBlock builds the payload-attributes to replace an invalidated block. // See https://github.com/ethereum-optimism/specs/blob/main/specs/interop/derivation.md#replacing-invalid-blocks func AttributesToReplaceInvalidBlock(invalidatedBlock *eth.ExecutionPayloadEnvelope) *eth.PayloadAttributes { @@ -26,9 +28,12 @@ func AttributesToReplaceInvalidBlock(invalidatedBlock *eth.ExecutionPayloadEnvel } } // Add the system-tx that declares the replacement. + if invalidatedBlock.ExecutionPayload.WithdrawalsRoot == nil { + panic("withdrawals-root is nil") + } l2Output := eth.OutputV0{ StateRoot: invalidatedBlock.ExecutionPayload.StateRoot, - MessagePasserStorageRoot: eth.Bytes32{}, // TODO: the withdrawals-root in header is needed here. + MessagePasserStorageRoot: eth.Bytes32(*invalidatedBlock.ExecutionPayload.WithdrawalsRoot), BlockHash: invalidatedBlock.ExecutionPayload.BlockHash, } outputRootPreimage := l2Output.Marshal() @@ -67,7 +72,7 @@ func InvalidatedBlockSourceDepositTx(outputRootPreimage []byte) *types.Transacti src := derive.InvalidatedBlockSource{OutputRoot: outputRoot} return types.NewTx(&types.DepositTx{ SourceHash: src.SourceHash(), - From: derive.L1InfoDepositerAddress, + From: OptimisticBlockDepositSenderAddress, To: &common.Address{}, // to the zero address, no EVM execution. Mint: big.NewInt(0), Value: big.NewInt(0), @@ -97,7 +102,7 @@ func DecodeInvalidatedBlockTx(tx *types.Transaction) (*eth.OutputV0, error) { if err != nil { return nil, fmt.Errorf("failed to get invalidated-block deposit-tx sender addr: %w", err) } - if from != derive.L1InfoDepositerAddress { + if from != OptimisticBlockDepositSenderAddress { return nil, fmt.Errorf("expected system tx sender, but got %s", from) } out, err := eth.UnmarshalOutput(tx.Data()) diff --git a/op-node/rollup/interop/managed/attributes_test.go b/op-node/rollup/interop/managed/attributes_test.go index 96950fcfc5c..7966c173178 100644 --- a/op-node/rollup/interop/managed/attributes_test.go +++ b/op-node/rollup/interop/managed/attributes_test.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testutils" ) @@ -46,6 +45,7 @@ func TestAttributesToReplaceInvalidBlock(t *testing.T) { denominator := uint64(100) elasticity := uint64(42) extraData := eip1559.EncodeHoloceneExtraData(denominator, elasticity) + withdrawalsRoot := testutils.RandomHash(rng) beaconRoot := testutils.RandomHash(rng) invalidatedBlock := ð.ExecutionPayloadEnvelope{ @@ -68,9 +68,10 @@ func TestAttributesToReplaceInvalidBlock(t *testing.T) { opaqueDepositTx, opaqueUserTx, }, - Withdrawals: &types.Withdrawals{}, - BlobGasUsed: new(eth.Uint64Quantity), - ExcessBlobGas: new(eth.Uint64Quantity), + Withdrawals: &types.Withdrawals{}, + BlobGasUsed: new(eth.Uint64Quantity), + ExcessBlobGas: new(eth.Uint64Quantity), + WithdrawalsRoot: &withdrawalsRoot, }, } attrs := AttributesToReplaceInvalidBlock(invalidatedBlock) @@ -91,8 +92,7 @@ func TestAttributesToReplaceInvalidBlock(t *testing.T) { require.NoError(t, err) require.Equal(t, invalidatedBlock.ExecutionPayload.BlockHash, result.BlockHash) require.Equal(t, invalidatedBlock.ExecutionPayload.StateRoot, result.StateRoot) - // Once withdrawals-root feature lands and it is part of the execution-payload type, assert here - //require.Equal(t, nil, result.MessagePasserStorageRoot) + require.Equal(t, withdrawalsRoot[:], result.MessagePasserStorageRoot[:]) } // TestInvalidatedBlockTx tests we can encode/decode the system tx that represents the invalidated block @@ -118,7 +118,7 @@ func TestInvalidatedBlockTx(t *testing.T) { signer := types.LatestSignerForChainID(big.NewInt(0)) sender, err := signer.Sender(tx) require.NoError(t, err) - require.Equal(t, derive.L1InfoDepositerAddress, sender, "from") + require.Equal(t, OptimisticBlockDepositSenderAddress, sender, "from") require.Equal(t, common.Address{}, *tx.To(), "to") require.Equal(t, "0", tx.Mint().String(), "mint") require.Equal(t, "0", tx.Value().String(), "value") diff --git a/op-node/rollup/interop/managed/system.go b/op-node/rollup/interop/managed/system.go index e39f71b3989..5b2540b0492 100644 --- a/op-node/rollup/interop/managed/system.go +++ b/op-node/rollup/interop/managed/system.go @@ -327,11 +327,7 @@ func (m *ManagedMode) ChainID(ctx context.Context) (eth.ChainID, error) { } func (m *ManagedMode) OutputV0AtTimestamp(ctx context.Context, timestamp uint64) (*eth.OutputV0, error) { - num, err := m.cfg.TargetBlockNumber(timestamp) - if err != nil { - return nil, err - } - ref, err := m.l2.L2BlockRefByNumber(ctx, num) + ref, err := m.L2BlockRefByTimestamp(ctx, timestamp) if err != nil { return nil, err } @@ -339,11 +335,7 @@ func (m *ManagedMode) OutputV0AtTimestamp(ctx context.Context, timestamp uint64) } func (m *ManagedMode) PendingOutputV0AtTimestamp(ctx context.Context, timestamp uint64) (*eth.OutputV0, error) { - num, err := m.cfg.TargetBlockNumber(timestamp) - if err != nil { - return nil, err - } - ref, err := m.l2.L2BlockRefByNumber(ctx, num) + ref, err := m.L2BlockRefByTimestamp(ctx, timestamp) if err != nil { return nil, err } @@ -352,3 +344,11 @@ func (m *ManagedMode) PendingOutputV0AtTimestamp(ctx context.Context, timestamp // For now, we use the output at timestamp as-if it didn't contain invalid messages for happy path testing. return m.l2.OutputV0AtBlock(ctx, ref.Hash) } + +func (m *ManagedMode) L2BlockRefByTimestamp(ctx context.Context, timestamp uint64) (eth.L2BlockRef, error) { + num, err := m.cfg.TargetBlockNumber(timestamp) + if err != nil { + return eth.L2BlockRef{}, err + } + return m.l2.L2BlockRefByNumber(ctx, num) +} diff --git a/op-node/service.go b/op-node/service.go index 7df2ab9a121..9ed2e839273 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -171,9 +171,11 @@ func NewL2EndpointConfig(ctx *cli.Context, logger log.Logger) (*node.L2EndpointC if err != nil { return nil, err } + l2RpcTimeout := ctx.Duration(flags.L2EngineRpcTimeout.Name) return &node.L2EndpointConfig{ - L2EngineAddr: l2Addr, - L2EngineJWTSecret: secret, + L2EngineAddr: l2Addr, + L2EngineJWTSecret: secret, + L2EngineCallTimeout: l2RpcTimeout, }, nil } diff --git a/op-program/Dockerfile.repro b/op-program/Dockerfile.repro index 346dc416e81..0bb95ed1202 100644 --- a/op-program/Dockerfile.repro +++ b/op-program/Dockerfile.repro @@ -34,7 +34,6 @@ RUN --mount=type=cache,target=/root/.cache/go-build cd op-program && make op-pro # Run the op-program-client.elf binary directly through cannon's load-elf subcommand. RUN /app/cannon/bin/cannon load-elf --type singlethreaded-2 --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate.bin.gz --meta "/app/op-program/bin/meta.json" -RUN /app/cannon/bin/cannon load-elf --type multithreaded-2 --path /app/op-program/bin/op-program-client.elf --out /app/op-program/bin/prestate-mt.bin.gz --meta "/app/op-program/bin/meta-mt.json" RUN /app/cannon/bin/cannon load-elf --type multithreaded64-3 --path /app/op-program/bin/op-program-client64.elf --out /app/op-program/bin/prestate-mt64.bin.gz --meta "/app/op-program/bin/meta-mt64.json" RUN /app/cannon/bin/cannon load-elf --type multithreaded64-3 --path /app/op-program/bin/op-program-client-interop.elf --out /app/op-program/bin/prestate-interop.bin.gz --meta "/app/op-program/bin/meta-interop.json" @@ -42,9 +41,6 @@ RUN /app/cannon/bin/cannon load-elf --type multithreaded64-3 --path /app/op-prog RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate.bin.gz --meta "" --proof-fmt '/app/op-program/bin/%d.json' --output "" RUN mv /app/op-program/bin/0.json /app/op-program/bin/prestate-proof.json -RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate-mt.bin.gz --meta "" --proof-fmt '/app/op-program/bin/%d-mt.json' --output "" -RUN mv /app/op-program/bin/0-mt.json /app/op-program/bin/prestate-proof-mt.json - RUN /app/cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input /app/op-program/bin/prestate-mt64.bin.gz --meta "" --proof-fmt '/app/op-program/bin/%d-mt64.json' --output "" RUN mv /app/op-program/bin/0-mt64.json /app/op-program/bin/prestate-proof-mt64.json @@ -56,7 +52,6 @@ RUN mv /app/op-program/bin/0-interop.json /app/op-program/bin/prestate-proof-int # e.g. `BUILDKIT=1 docker build ...` FROM scratch AS export-stage-proofs COPY --from=builder /app/op-program/bin/prestate-proof.json . -COPY --from=builder /app/op-program/bin/prestate-proof-mt.json . COPY --from=builder /app/op-program/bin/prestate-proof-mt64.json . COPY --from=builder /app/op-program/bin/prestate-proof-interop.json . @@ -66,9 +61,6 @@ COPY --from=builder /app/op-program/bin/op-program-client64.elf . COPY --from=builder /app/op-program/bin/meta.json . COPY --from=builder /app/op-program/bin/prestate.bin.gz . COPY --from=builder /app/op-program/bin/prestate-proof.json . -COPY --from=builder /app/op-program/bin/meta-mt.json . -COPY --from=builder /app/op-program/bin/prestate-mt.bin.gz . -COPY --from=builder /app/op-program/bin/prestate-proof-mt.json . COPY --from=builder /app/op-program/bin/meta-mt64.json . COPY --from=builder /app/op-program/bin/prestate-mt64.bin.gz . COPY --from=builder /app/op-program/bin/prestate-proof-mt64.json . diff --git a/op-program/Makefile b/op-program/Makefile index 037fcce3b9c..c2462ea33bf 100644 --- a/op-program/Makefile +++ b/op-program/Makefile @@ -51,8 +51,6 @@ reproducible-prestate: @docker build --output ./bin/ --progress plain -f Dockerfile.repro ../ @echo "Cannon Absolute prestate hash: " @cat ./bin/prestate-proof.json | jq -r .pre - @echo "MT-Cannon Absolute prestate hash: " - @cat ./bin/prestate-proof-mt.json | jq -r .pre @echo "Cannon64 Absolute prestate hash: " @cat ./bin/prestate-proof-mt64.json | jq -r .pre @echo "CannonInterop Absolute prestate hash: " diff --git a/op-program/client/interop/interop.go b/op-program/client/interop/interop.go index 20001db9287..311a284de64 100644 --- a/op-program/client/interop/interop.go +++ b/op-program/client/interop/interop.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -199,6 +200,8 @@ func (t *interopTaskExecutor) RunDerivation( claimedBlockNumber, l1Oracle, l2Oracle, + memorydb.New(), + tasks.DerivationOptions{StoreBlockData: true}, ) } diff --git a/op-program/client/l2/db.go b/op-program/client/l2/db.go index 1bcf2a6b74e..0c1f2c3714c 100644 --- a/op-program/client/l2/db.go +++ b/op-program/client/l2/db.go @@ -9,22 +9,29 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" ) var codePrefixedKeyLength = common.HashLength + len(rawdb.CodePrefix) var ErrInvalidKeyLength = errors.New("pre-images must be identified by 32-byte hash keys") +// KeyValueStore is a subset of the ethdb.KeyValueStore interface that's required for block processing. +type KeyValueStore interface { + ethdb.KeyValueReader + ethdb.Batcher + // Put inserts the given value into the key-value data store. + Put(key []byte, value []byte) error +} + type OracleKeyValueStore struct { - db ethdb.KeyValueStore + db KeyValueStore oracle StateOracle chainID eth.ChainID } -func NewOracleBackedDB(oracle StateOracle, chainID eth.ChainID) *OracleKeyValueStore { +func NewOracleBackedDB(kv KeyValueStore, oracle StateOracle, chainID eth.ChainID) *OracleKeyValueStore { return &OracleKeyValueStore{ - db: memorydb.New(), + db: kv, oracle: oracle, chainID: chainID, } diff --git a/op-program/client/l2/db_test.go b/op-program/client/l2/db_test.go index 96f2a863f99..5b92a516504 100644 --- a/op-program/client/l2/db_test.go +++ b/op-program/client/l2/db_test.go @@ -35,7 +35,7 @@ var _ ethdb.KeyValueStore = (*OracleKeyValueStore)(nil) func TestGet(t *testing.T) { t.Run("IncorrectLengthKey", func(t *testing.T) { oracle := test.NewStubStateOracle(t) - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) val, err := db.Get([]byte{1, 2, 3}) require.ErrorIs(t, err, ErrInvalidKeyLength) require.Nil(t, val) @@ -43,7 +43,7 @@ func TestGet(t *testing.T) { t.Run("KeyWithCodePrefix", func(t *testing.T) { oracle := test.NewStubStateOracle(t) - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) key := common.HexToHash("0x12345678") prefixedKey := append(rawdb.CodePrefix, key.Bytes()...) @@ -57,7 +57,7 @@ func TestGet(t *testing.T) { t.Run("NormalKeyThatHappensToStartWithCodePrefix", func(t *testing.T) { oracle := test.NewStubStateOracle(t) - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) key := make([]byte, common.HashLength) copy(rawdb.CodePrefix, key) fmt.Println(key[0]) @@ -74,7 +74,7 @@ func TestGet(t *testing.T) { expected := []byte{2, 6, 3, 8} oracle := test.NewStubStateOracle(t) oracle.Data[key] = expected - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) val, err := db.Get(key.Bytes()) require.NoError(t, err) require.Equal(t, expected, val) @@ -84,7 +84,7 @@ func TestGet(t *testing.T) { func TestPut(t *testing.T) { t.Run("NewKey", func(t *testing.T) { oracle := test.NewStubStateOracle(t) - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) key := common.HexToHash("0xAA4488") value := []byte{2, 6, 3, 8} err := db.Put(key.Bytes(), value) @@ -96,7 +96,7 @@ func TestPut(t *testing.T) { }) t.Run("ReplaceKey", func(t *testing.T) { oracle := test.NewStubStateOracle(t) - db := NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234)) + db := NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234)) key := common.HexToHash("0xAA4488") value1 := []byte{2, 6, 3, 8} value2 := []byte{1, 2, 3} @@ -118,13 +118,13 @@ func TestSupportsStateDBOperations(t *testing.T) { genesisBlock := l2Genesis.MustCommit(realDb, trieDB) loader := test.NewKvStateOracle(t, realDb) - assertStateDataAvailable(t, NewOracleBackedDB(loader, eth.ChainIDFromUInt64(1234)), l2Genesis, genesisBlock) + assertStateDataAvailable(t, NewOracleBackedDB(memorydb.New(), loader, eth.ChainIDFromUInt64(1234)), l2Genesis, genesisBlock) } func TestUpdateState(t *testing.T) { l2Genesis := createGenesis() oracle := test.NewStubStateOracle(t) - db := rawdb.NewDatabase(NewOracleBackedDB(oracle, eth.ChainIDFromUInt64(1234))) + db := rawdb.NewDatabase(NewOracleBackedDB(memorydb.New(), oracle, eth.ChainIDFromUInt64(1234))) trieDB := triedb.NewDatabase(db, &triedb.Config{HashDB: hashdb.Defaults}) genesisBlock := l2Genesis.MustCommit(db, trieDB) @@ -144,7 +144,8 @@ func TestUpdateState(t *testing.T) { require.Equal(t, []byte{1}, statedb.GetCode(codeAccount)) // Changes should be available under the new state root after committing - newRoot, err := statedb.Commit(genesisBlock.NumberU64()+1, false) + isCancun := l2Genesis.Config.IsCancun(genesisBlock.Number(), genesisBlock.Time()) + newRoot, err := statedb.Commit(genesisBlock.NumberU64()+1, false, isCancun) require.NoError(t, err) err = statedb.Database().TrieDB().Commit(newRoot, true) require.NoError(t, err) diff --git a/op-program/client/l2/engine_backend.go b/op-program/client/l2/engine_backend.go index 6bc35302ad6..0ba24d47a47 100644 --- a/op-program/client/l2/engine_backend.go +++ b/op-program/client/l2/engine_backend.go @@ -34,14 +34,23 @@ type OracleBackedL2Chain struct { // Inserted blocks blocks map[common.Hash]*types.Block - db ethdb.KeyValueStore + // Receipts of inserted blocks + receiptsByBlockHash map[common.Hash]types.Receipts + db ethdb.KeyValueStore } // Must implement CachingEngineBackend, not just EngineBackend to ensure that blocks are stored when they are created // and don't need to be re-executed when sent back via execution_newPayload. var _ engineapi.CachingEngineBackend = (*OracleBackedL2Chain)(nil) -func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, precompileOracle engineapi.PrecompileOracle, chainCfg *params.ChainConfig, l2OutputRoot common.Hash) (*OracleBackedL2Chain, error) { +func NewOracleBackedL2Chain( + logger log.Logger, + oracle Oracle, + precompileOracle engineapi.PrecompileOracle, + chainCfg *params.ChainConfig, + l2OutputRoot common.Hash, + db KeyValueStore, +) (*OracleBackedL2Chain, error) { chainID := eth.ChainIDFromBig(chainCfg.ChainID) output := oracle.OutputByRoot(l2OutputRoot, chainID) outputV0, ok := output.(*eth.OutputV0) @@ -50,10 +59,17 @@ func NewOracleBackedL2Chain(logger log.Logger, oracle Oracle, precompileOracle e } head := oracle.BlockByHash(outputV0.BlockHash, chainID) logger.Info("Loaded L2 head", "hash", head.Hash(), "number", head.Number()) - return NewOracleBackedL2ChainFromHead(logger, oracle, precompileOracle, chainCfg, head), nil + return NewOracleBackedL2ChainFromHead(logger, oracle, precompileOracle, chainCfg, head, db), nil } -func NewOracleBackedL2ChainFromHead(logger log.Logger, oracle Oracle, precompileOracle engineapi.PrecompileOracle, chainCfg *params.ChainConfig, head *types.Block) *OracleBackedL2Chain { +func NewOracleBackedL2ChainFromHead( + logger log.Logger, + oracle Oracle, + precompileOracle engineapi.PrecompileOracle, + chainCfg *params.ChainConfig, + head *types.Block, + db KeyValueStore, +) *OracleBackedL2Chain { chainID := eth.ChainIDFromBig(chainCfg.ChainID) chain := &OracleBackedL2Chain{ log: logger, @@ -62,11 +78,12 @@ func NewOracleBackedL2ChainFromHead(logger log.Logger, oracle Oracle, precompile engine: beacon.New(nil), // Treat the agreed starting head as finalized - nothing before it can be disputed - safe: head.Header(), - finalized: head.Header(), - oracleHead: head.Header(), - blocks: make(map[common.Hash]*types.Block), - db: NewOracleBackedDB(oracle, chainID), + safe: head.Header(), + finalized: head.Header(), + oracleHead: head.Header(), + blocks: make(map[common.Hash]*types.Block), + receiptsByBlockHash: make(map[common.Hash]types.Receipts), + db: NewOracleBackedDB(db, oracle, chainID), vmCfg: vm.Config{ PrecompileOverrides: engineapi.CreatePrecompileOverrides(precompileOracle), }, @@ -150,6 +167,15 @@ func (o *OracleBackedL2Chain) GetCanonicalHash(n uint64) common.Hash { return header.Hash() } +func (o *OracleBackedL2Chain) GetReceiptsByBlockHash(hash common.Hash) types.Receipts { + receipts, ok := o.receiptsByBlockHash[hash] + if ok { + return receipts + } + _, receipts = o.oracle.ReceiptsByBlockHash(hash, eth.ChainIDFromBig(o.chainCfg.ChainID)) + return receipts +} + func (o *OracleBackedL2Chain) GetVMConfig() *vm.Config { return &o.vmCfg } @@ -193,7 +219,7 @@ func (o *OracleBackedL2Chain) InsertBlockWithoutSetHead(block *types.Block, make } func (o *OracleBackedL2Chain) AssembleAndInsertBlockWithoutSetHead(processor *engineapi.BlockProcessor) (*types.Block, error) { - block, err := processor.Assemble() + block, receipts, err := processor.Assemble() if err != nil { return nil, fmt.Errorf("invalid block: %w", err) } @@ -202,6 +228,7 @@ func (o *OracleBackedL2Chain) AssembleAndInsertBlockWithoutSetHead(processor *en return nil, fmt.Errorf("commit block: %w", err) } o.blocks[block.Hash()] = block + o.receiptsByBlockHash[block.Hash()] = receipts return block, nil } diff --git a/op-program/client/l2/engine_backend_test.go b/op-program/client/l2/engine_backend_test.go index c506c12e707..b204b3d12df 100644 --- a/op-program/client/l2/engine_backend_test.go +++ b/op-program/client/l2/engine_backend_test.go @@ -18,8 +18,10 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/hashdb" "github.com/stretchr/testify/require" @@ -117,6 +119,26 @@ func TestSetSafe(t *testing.T) { require.Equal(t, blocks[2].Header(), chain.CurrentSafeBlock()) } +func TestGetReceiptsByBlockHash(t *testing.T) { + blocks, chain := setupOracleBackedChain(t, 1) + + _, err := chain.InsertBlockWithoutSetHead(blocks[1], false) + require.NoError(t, err) + require.Equal(t, + blocks[1].ReceiptHash(), + types.DeriveSha(chain.GetReceiptsByBlockHash(blocks[1].Hash()), trie.NewStackTrie(nil)), + "Lookup block1 receipt", + ) + + // create block with txs/receipts + newBlock := createBlock(t, chain) + _, err = chain.InsertBlockWithoutSetHead(newBlock, false) + require.NoError(t, err) + receipts := chain.GetReceiptsByBlockHash(newBlock.Hash()) + require.NotNil(t, receipts) + require.Equal(t, newBlock.ReceiptHash(), types.DeriveSha(receipts, trie.NewStackTrie(nil))) +} + func TestUpdateStateDatabaseWhenImportingBlock(t *testing.T) { blocks, chain := setupOracleBackedChain(t, 3) newBlock := createBlock(t, chain) @@ -259,7 +281,7 @@ func TestPrecompileOracle(t *testing.T) { precompileOracle.Results = map[common.Hash]l2test.PrecompileResult{ crypto.Keccak256Hash(arg): {Result: test.result, Ok: true}, } - chain, err := NewOracleBackedL2Chain(logger, oracle, precompileOracle, chainCfg, common.Hash(eth.OutputRoot(&stubOutput))) + chain, err := NewOracleBackedL2Chain(logger, oracle, precompileOracle, chainCfg, common.Hash(eth.OutputRoot(&stubOutput)), memorydb.New()) require.NoError(t, err) newBlock := createBlock(t, chain, WithInput(test.input), WithTargetAddress(test.target)) @@ -288,7 +310,7 @@ func setupOracleBackedChainWithLowerHead(t *testing.T, blockCount int, headBlock head := blocks[headBlockNumber].Hash() stubOutput := eth.OutputV0{BlockHash: head} precompileOracle := l2test.NewStubPrecompileOracle(t) - chain, err := NewOracleBackedL2Chain(logger, oracle, precompileOracle, chainCfg, common.Hash(eth.OutputRoot(&stubOutput))) + chain, err := NewOracleBackedL2Chain(logger, oracle, precompileOracle, chainCfg, common.Hash(eth.OutputRoot(&stubOutput)), memorydb.New()) require.NoError(t, err) return blocks, chain } @@ -378,7 +400,7 @@ func createBlock(t *testing.T, chain *OracleBackedL2Chain, opts ...blockCreateOp require.NoError(t, err) nonce := parentDB.GetNonce(fundedAddress) config := chain.Config() - db := rawdb.NewDatabase(NewOracleBackedDB(chain.oracle, eth.ChainIDFromBig(config.ChainID))) + db := rawdb.NewDatabase(NewOracleBackedDB(memorydb.New(), chain.oracle, eth.ChainIDFromBig(config.ChainID))) blocks, _ := core.GenerateChain(config, parent, chain.Engine(), db, 1, func(i int, gen *core.BlockGen) { rawTx := &types.DynamicFeeTx{ ChainID: config.ChainID, diff --git a/op-program/client/l2/engine_test.go b/op-program/client/l2/engine_test.go index 56ed3a7a16c..372d765032d 100644 --- a/op-program/client/l2/engine_test.go +++ b/op-program/client/l2/engine_test.go @@ -219,6 +219,10 @@ func (s *stubEngineBackend) GetCanonicalHash(n uint64) common.Hash { return s.canonical[n] } +func (s *stubEngineBackend) GetReceiptsByBlockHash(hash common.Hash) types.Receipts { + panic("unsupported") +} + func (s *stubEngineBackend) GetBlock(hash common.Hash, number uint64) *types.Block { panic("unsupported") } diff --git a/op-program/client/l2/engineapi/block_processor.go b/op-program/client/l2/engineapi/block_processor.go index 2126271c2d4..fb2de328f38 100644 --- a/op-program/client/l2/engineapi/block_processor.go +++ b/op-program/client/l2/engineapi/block_processor.go @@ -91,7 +91,7 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* if vmConfig := provider.GetVMConfig(); vmConfig != nil && vmConfig.PrecompileOverrides != nil { precompileOverrides = vmConfig.PrecompileOverrides } - vmenv := vm.NewEVM(context, vm.TxContext{}, statedb, provider.Config(), vm.Config{PrecompileOverrides: precompileOverrides}) + vmenv := vm.NewEVM(context, statedb, provider.Config(), vm.Config{PrecompileOverrides: precompileOverrides}) return vmenv } if h.ParentBeaconRoot != nil { @@ -102,11 +102,11 @@ func NewBlockProcessorFromHeader(provider BlockDataProvider, h *types.Header) (* header.ExcessBlobGas = &zero } vmenv := mkEVM() - core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv, statedb) + core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, vmenv) } if provider.Config().IsPrague(header.Number, header.Time) { vmenv := mkEVM() - core.ProcessParentBlockHash(header.ParentHash, vmenv, statedb) + core.ProcessParentBlockHash(header.ParentHash, vmenv) } if provider.Config().IsIsthmus(header.Time) { // set the header withdrawals root for Isthmus blocks @@ -135,8 +135,12 @@ func (b *BlockProcessor) CheckTxWithinGasLimit(tx *types.Transaction) error { func (b *BlockProcessor) AddTx(tx *types.Transaction) error { txIndex := len(b.transactions) b.state.SetTxContext(tx.Hash(), txIndex) - receipt, err := core.ApplyTransaction(b.dataProvider.Config(), b.dataProvider, &b.header.Coinbase, - b.gasPool, b.state, b.header, tx, &b.header.GasUsed, *b.dataProvider.GetVMConfig()) + + context := core.NewEVMBlockContext(b.header, b.dataProvider, nil, b.dataProvider.Config(), b.state) + vmConfig := *b.dataProvider.GetVMConfig() + // TODO(#14038): reuse evm + evm := vm.NewEVM(context, b.state, b.dataProvider.Config(), vmConfig) + receipt, err := core.ApplyTransaction(evm, b.gasPool, b.state, b.header, tx, &b.header.GasUsed) if err != nil { return fmt.Errorf("failed to apply transaction to L2 block (tx %d): %w", txIndex, err) } @@ -145,16 +149,21 @@ func (b *BlockProcessor) AddTx(tx *types.Transaction) error { return nil } -func (b *BlockProcessor) Assemble() (*types.Block, error) { +func (b *BlockProcessor) Assemble() (*types.Block, types.Receipts, error) { body := types.Body{ Transactions: b.transactions, } - return b.dataProvider.Engine().FinalizeAndAssemble(b.dataProvider, b.header, b.state, &body, b.receipts) + block, err := b.dataProvider.Engine().FinalizeAndAssemble(b.dataProvider, b.header, b.state, &body, b.receipts) + if err != nil { + return nil, nil, err + } + return block, b.receipts, nil } func (b *BlockProcessor) Commit() error { - root, err := b.state.Commit(b.header.Number.Uint64(), b.dataProvider.Config().IsEIP158(b.header.Number)) + isCancun := b.dataProvider.Config().IsCancun(b.header.Number, b.header.Time) + root, err := b.state.Commit(b.header.Number.Uint64(), b.dataProvider.Config().IsEIP158(b.header.Number), isCancun) if err != nil { return fmt.Errorf("state write error: %w", err) } diff --git a/op-program/client/l2/engineapi/l2_engine_api.go b/op-program/client/l2/engineapi/l2_engine_api.go index 66e85e8d7ef..053689546f2 100644 --- a/op-program/client/l2/engineapi/l2_engine_api.go +++ b/op-program/client/l2/engineapi/l2_engine_api.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" @@ -49,6 +50,7 @@ type EngineBackend interface { type CachingEngineBackend interface { EngineBackend + GetReceiptsByBlockHash(hash common.Hash) types.Receipts AssembleAndInsertBlockWithoutSetHead(processor *BlockProcessor) (*types.Block, error) } @@ -197,7 +199,7 @@ func (ea *L2EngineAPI) endBlock() (*types.Block, error) { if cachingBackend, ok := ea.backend.(CachingEngineBackend); ok { block, err = cachingBackend.AssembleAndInsertBlockWithoutSetHead(processor) } else { - block, err = processor.Assemble() + block, _, err = processor.Assemble() } if err != nil { return nil, fmt.Errorf("assemble block: %w", err) @@ -393,7 +395,7 @@ func (ea *L2EngineAPI) forkchoiceUpdated(_ context.Context, state *eth.Forkchoic } ea.log.Info("Forkchoice requested sync to new head", "number", header.Number(), "hash", header.Hash()) - if err := ea.downloader.BeaconSync(downloader.SnapSync, header.Header(), nil); err != nil { + if err := ea.downloader.BeaconSync(ethconfig.SnapSync, header.Header(), nil); err != nil { return STATUS_SYNCING, err } return STATUS_SYNCING, nil diff --git a/op-program/client/l2/engineapi/l2_engine_api_test.go b/op-program/client/l2/engineapi/l2_engine_api_test.go index c5b39c51ce8..fd4eae3a768 100644 --- a/op-program/client/l2/engineapi/l2_engine_api_test.go +++ b/op-program/client/l2/engineapi/l2_engine_api_test.go @@ -111,7 +111,7 @@ type stubCachingBackend struct { } func (s *stubCachingBackend) AssembleAndInsertBlockWithoutSetHead(processor *BlockProcessor) (*types.Block, error) { - block, err := processor.Assemble() + block, _, err := processor.Assemble() if err != nil { return nil, err } @@ -121,4 +121,8 @@ func (s *stubCachingBackend) AssembleAndInsertBlockWithoutSetHead(processor *Blo return block, nil } +func (s *stubCachingBackend) GetReceiptsByBlockHash(hash common.Hash) types.Receipts { + panic("unsupported") +} + var _ CachingEngineBackend = (*stubCachingBackend)(nil) diff --git a/op-program/client/preinterop.go b/op-program/client/preinterop.go index 91b0db42c17..075bd948e32 100644 --- a/op-program/client/preinterop.go +++ b/op-program/client/preinterop.go @@ -10,7 +10,14 @@ import ( "github.com/ethereum/go-ethereum/log" ) -func RunPreInteropProgram(logger log.Logger, bootInfo *boot.BootInfo, l1PreimageOracle *l1.CachingOracle, l2PreimageOracle *l2.CachingOracle) error { +func RunPreInteropProgram( + logger log.Logger, + bootInfo *boot.BootInfo, + l1PreimageOracle *l1.CachingOracle, + l2PreimageOracle *l2.CachingOracle, + db l2.KeyValueStore, + opts tasks.DerivationOptions, +) error { logger.Info("Program Bootstrapped", "bootInfo", bootInfo) result, err := tasks.RunDerivation( logger, @@ -21,6 +28,8 @@ func RunPreInteropProgram(logger log.Logger, bootInfo *boot.BootInfo, l1Preimage bootInfo.L2ClaimBlockNumber, l1PreimageOracle, l2PreimageOracle, + db, + opts, ) if err != nil { return err diff --git a/op-program/client/program.go b/op-program/client/program.go index 7ce19f4b7ec..fbe3aedc002 100644 --- a/op-program/client/program.go +++ b/op-program/client/program.go @@ -11,13 +11,17 @@ import ( "github.com/ethereum-optimism/optimism/op-program/client/interop" "github.com/ethereum-optimism/optimism/op-program/client/l1" "github.com/ethereum-optimism/optimism/op-program/client/l2" + "github.com/ethereum-optimism/optimism/op-program/client/tasks" oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" ) type Config struct { SkipValidation bool InteropEnabled bool + DB l2.KeyValueStore + StoreBlockData bool } // Main executes the client program in a detached context and exits the current process. @@ -37,6 +41,7 @@ func Main(useInterop bool) { preimageHinter := preimage.ClientHinterChannel() config := Config{ InteropEnabled: useInterop, + DB: memorydb.New(), } if err := RunProgram(logger, preimageOracle, preimageHinter, config); errors.Is(err, claim.ErrClaimNotValid) { log.Error("Claim is invalid", "err", err) @@ -61,6 +66,10 @@ func RunProgram(logger log.Logger, preimageOracle io.ReadWriter, preimageHinter bootInfo := boot.BootstrapInterop(pClient) return interop.RunInteropProgram(logger, bootInfo, l1PreimageOracle, l2PreimageOracle, !cfg.SkipValidation) } + if cfg.DB == nil { + return errors.New("db config is required") + } bootInfo := boot.NewBootstrapClient(pClient).BootInfo() - return RunPreInteropProgram(logger, bootInfo, l1PreimageOracle, l2PreimageOracle) + derivationOptions := tasks.DerivationOptions{StoreBlockData: cfg.StoreBlockData} + return RunPreInteropProgram(logger, bootInfo, l1PreimageOracle, l2PreimageOracle, cfg.DB, derivationOptions) } diff --git a/op-program/client/tasks/deposits_block.go b/op-program/client/tasks/deposits_block.go index b92eaffe236..d082ac4875d 100644 --- a/op-program/client/tasks/deposits_block.go +++ b/op-program/client/tasks/deposits_block.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -32,7 +33,7 @@ func BuildDepositOnlyBlock( l1Oracle l1.Oracle, l2Oracle l2.Oracle, ) (common.Hash, eth.Bytes32, error) { - engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l1Oracle, l2Cfg, common.Hash(agreedL2OutputRoot)) + engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l1Oracle, l2Cfg, common.Hash(agreedL2OutputRoot), memorydb.New()) if err != nil { return common.Hash{}, eth.Bytes32{}, fmt.Errorf("failed to create oracle-backed L2 chain: %w", err) } @@ -69,6 +70,20 @@ func BuildDepositOnlyBlock( if err != nil { return common.Hash{}, eth.Bytes32{}, fmt.Errorf("failed to get payload: %w", err) } + + // Sync the engine's view so we can fetch the latest output root + result, err = l2Source.ForkchoiceUpdate(context.Background(), ð.ForkchoiceState{ + HeadBlockHash: payload.ExecutionPayload.BlockHash, + SafeBlockHash: payload.ExecutionPayload.BlockHash, + FinalizedBlockHash: payload.ExecutionPayload.BlockHash, + }, nil) + if err != nil { + return common.Hash{}, eth.Bytes32{}, fmt.Errorf("failed to update forkchoice state (no build): %w", err) + } + if result.PayloadStatus.Status != eth.ExecutionValid { + return common.Hash{}, eth.Bytes32{}, fmt.Errorf("failed to update forkchoice state (no build): %w", eth.ForkchoiceUpdateErr(result.PayloadStatus)) + } + blockHash, outputRoot, err := l2Source.L2OutputRoot(uint64(payload.ExecutionPayload.BlockNumber)) if err != nil { return common.Hash{}, eth.Bytes32{}, fmt.Errorf("failed to get L2 output root: %w", err) @@ -77,7 +92,7 @@ func BuildDepositOnlyBlock( } func getL2Output(logger log.Logger, cfg *rollup.Config, l2Cfg *params.ChainConfig, l2Oracle l2.Oracle, l1Oracle l1.Oracle, block *types.Block) (*eth.OutputV0, error) { - backend := l2.NewOracleBackedL2ChainFromHead(logger, l2Oracle, l1Oracle, l2Cfg, block) + backend := l2.NewOracleBackedL2ChainFromHead(logger, l2Oracle, l1Oracle, l2Cfg, block, memorydb.New()) engine := l2.NewOracleEngine(cfg, logger, backend) output, err := engine.L2OutputAtBlockHash(block.Hash()) if err != nil { @@ -118,8 +133,8 @@ func blockToDepositsOnlyAttributes(cfg *rollup.Config, block *types.Block, outpu } if cfg.IsHolocene(block.Time()) { d, e := eip1559.DecodeHoloceneExtraData(block.Extra()) - eip1559Params := eip1559.EncodeHolocene1559Params(d, e) - copy(attrs.EIP1559Params[:], eip1559Params) + eip1559Params := eth.Bytes8(eip1559.EncodeHolocene1559Params(d, e)) + attrs.EIP1559Params = &eip1559Params } return attrs, nil } diff --git a/op-program/client/tasks/derive.go b/op-program/client/tasks/derive.go index 711259b7a5f..46f78ff598f 100644 --- a/op-program/client/tasks/derive.go +++ b/op-program/client/tasks/derive.go @@ -4,13 +4,19 @@ import ( "fmt" "github.com/ethereum-optimism/optimism/op-node/rollup" + preimage "github.com/ethereum-optimism/optimism/op-preimage" cldr "github.com/ethereum-optimism/optimism/op-program/client/driver" "github.com/ethereum-optimism/optimism/op-program/client/l1" "github.com/ethereum-optimism/optimism/op-program/client/l2" + "github.com/ethereum-optimism/optimism/op-program/client/l2/engineapi" + "github.com/ethereum-optimism/optimism/op-program/client/mpt" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" ) type L2Source interface { @@ -23,6 +29,12 @@ type DerivationResult struct { OutputRoot eth.Bytes32 } +type DerivationOptions struct { + // StoreBlockData controls whether block data, including intermediate trie nodes from transactions and receipts + // of the derived block should be stored in the l2.KeyValueStore. + StoreBlockData bool +} + // RunDerivation executes the L2 state transition, given a minimal interface to retrieve data. // Returns the L2BlockRef of the safe head reached and the output root at l2ClaimBlockNum or // the final safe head when l1Head is reached if l2ClaimBlockNum is not reached. @@ -37,10 +49,12 @@ func RunDerivation( l2ClaimBlockNum uint64, l1Oracle l1.Oracle, l2Oracle l2.Oracle, + db l2.KeyValueStore, + options DerivationOptions, ) (DerivationResult, error) { l1Source := l1.NewOracleL1Client(logger, l1Oracle, l1Head) l1BlobsSource := l1.NewBlobFetcher(logger, l1Oracle) - engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l1Oracle, l2Cfg, l2OutputRoot) + engineBackend, err := l2.NewOracleBackedL2Chain(logger, l2Oracle, l1Oracle, l2Cfg, l2OutputRoot, db) if err != nil { return DerivationResult{}, fmt.Errorf("failed to create oracle-backed L2 chain: %w", err) } @@ -53,6 +67,13 @@ func RunDerivation( return DerivationResult{}, fmt.Errorf("failed to run program to completion: %w", err) } logger.Info("Derivation complete", "head", result) + + if options.StoreBlockData { + if err := storeBlockData(result, db, engineBackend); err != nil { + return DerivationResult{}, fmt.Errorf("failed to write trie nodes: %w", err) + } + logger.Info("Trie nodes written") + } return loadOutputRoot(l2ClaimBlockNum, result, l2Source) } @@ -67,3 +88,46 @@ func loadOutputRoot(l2ClaimBlockNum uint64, head eth.L2BlockRef, src L2Source) ( OutputRoot: outputRoot, }, nil } + +func storeBlockData(derived eth.L2BlockRef, db l2.KeyValueStore, backend engineapi.CachingEngineBackend) error { + block := backend.GetBlockByHash(derived.Hash) + if block == nil { + return fmt.Errorf("derived block %v is missing", derived.Hash) + } + headerRLP, err := rlp.EncodeToBytes(block.Header()) + if err != nil { + return fmt.Errorf("failed to encode block header: %w", err) + } + blockHashKey := preimage.Keccak256Key(derived.Hash).PreimageKey() + if err := db.Put(blockHashKey[:], headerRLP); err != nil { + return fmt.Errorf("failed to store block header: %w", err) + } + + opaqueTxs, err := eth.EncodeTransactions(block.Transactions()) + if err != nil { + return err + } + if err := storeTrieNodes(opaqueTxs, db); err != nil { + return err + } + receipts := backend.GetReceiptsByBlockHash(block.Hash()) + if receipts == nil { + return fmt.Errorf("receipts for block %v are missing", block.Hash()) + } + opaqueReceipts, err := eth.EncodeReceipts(receipts) + if err != nil { + return err + } + return storeTrieNodes(opaqueReceipts, db) +} + +func storeTrieNodes(values []hexutil.Bytes, db l2.KeyValueStore) error { + _, nodes := mpt.WriteTrie(values) + for _, node := range nodes { + key := preimage.Keccak256Key(crypto.Keccak256Hash(node)).PreimageKey() + if err := db.Put(key[:], node); err != nil { + return fmt.Errorf("failed to store node: %w", err) + } + } + return nil +} diff --git a/op-program/host/cmd/main_test.go b/op-program/host/cmd/main_test.go index a080ec69a02..c50fa7a3885 100644 --- a/op-program/host/cmd/main_test.go +++ b/op-program/host/cmd/main_test.go @@ -251,6 +251,16 @@ func TestL2Head(t *testing.T) { require.Equal(t, common.HexToHash(l2HeadValue), cfg.L2Head) }) + t.Run("NotRequiredForInterop", func(t *testing.T) { + req := requiredArgs() + delete(req, "--l2.head") + delete(req, "--l2.outputroot") + args := append(toArgList(req), "--l2.agreed-prestate", "0x1234") + cfg := configForArgs(t, args) + require.Equal(t, common.Hash{}, cfg.L2Head) + require.True(t, cfg.InteropEnabled) + }) + t.Run("Invalid", func(t *testing.T) { verifyArgsInvalid(t, config.ErrInvalidL2Head.Error(), replaceRequiredArg("--l2.head", "something")) }) diff --git a/op-program/host/common/common.go b/op-program/host/common/common.go index 62f2aa471f8..b7f9cfbb7ad 100644 --- a/op-program/host/common/common.go +++ b/op-program/host/common/common.go @@ -11,9 +11,11 @@ import ( preimage "github.com/ethereum-optimism/optimism/op-preimage" cl "github.com/ethereum-optimism/optimism/op-program/client" + "github.com/ethereum-optimism/optimism/op-program/client/l2" "github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/kvstore" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/log" ) @@ -25,25 +27,47 @@ type PrefetcherCreator func(ctx context.Context, logger log.Logger, kv kvstore.K type programCfg struct { prefetcher PrefetcherCreator skipValidation bool + db l2.KeyValueStore + storeBlockData bool } type ProgramOpt func(c *programCfg) +// WithPrefetcher configures the prefetcher used by the preimage server. func WithPrefetcher(creator PrefetcherCreator) ProgramOpt { return func(c *programCfg) { c.prefetcher = creator } } +// WithSkipValidation controls whether the program will skip validation of the derived block. func WithSkipValidation(skip bool) ProgramOpt { return func(c *programCfg) { c.skipValidation = skip } } +// WithDB sets the backing state database used by the program. +// If not set, the program will use an in-memory database. +func WithDB(db l2.KeyValueStore) ProgramOpt { + return func(c *programCfg) { + c.db = db + } +} + +// WithStoreBlockData controls whether block data, including intermediate trie nodes from transactions and receipts +// of the derived block should be stored in the database. +func WithStoreBlockData(store bool) ProgramOpt { + return func(c *programCfg) { + c.storeBlockData = store + } +} + // FaultProofProgram is the programmatic entry-point for the fault proof program func FaultProofProgram(ctx context.Context, logger log.Logger, cfg *config.Config, opts ...ProgramOpt) error { - programConfig := &programCfg{} + programConfig := &programCfg{ + db: memorydb.New(), + } for _, opt := range opts { opt(programConfig) } @@ -115,6 +139,8 @@ func FaultProofProgram(ctx context.Context, logger log.Logger, cfg *config.Confi clientCfg.SkipValidation = true } clientCfg.InteropEnabled = cfg.InteropEnabled + clientCfg.DB = programConfig.db + clientCfg.StoreBlockData = programConfig.storeBlockData return cl.RunProgram(logger, pClientRW, hClientRW, clientCfg) } } diff --git a/op-program/host/common/l2_store.go b/op-program/host/common/l2_store.go new file mode 100644 index 00000000000..c18d668c189 --- /dev/null +++ b/op-program/host/common/l2_store.go @@ -0,0 +1,124 @@ +package common + +import ( + "bytes" + + "github.com/ethereum-optimism/optimism/op-program/client/l2" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" +) + +type l2KeyValueStore struct { + kv kvstore.KV +} + +var _ l2.KeyValueStore = (*l2KeyValueStore)(nil) + +// NewL2KeyValueStore creates a l2.KeyValueStore compatible database that's backed by a [kvstore.KV] +func NewL2KeyValueStore(kv kvstore.KV) *l2KeyValueStore { + return &l2KeyValueStore{kv: kv} +} + +var codePrefixedKeyLength = common.HashLength + len(rawdb.CodePrefix) + +func unwrapKey(key []byte) []byte { + if len(key) == codePrefixedKeyLength && bytes.HasPrefix(key, rawdb.CodePrefix) { + return key[len(rawdb.CodePrefix):] + } + return key +} + +func (db *l2KeyValueStore) Get(key []byte) ([]byte, error) { + key = unwrapKey(key) + if len(key) != common.HashLength { + return nil, l2.ErrInvalidKeyLength + } + return db.kv.Get(common.Hash(key)) +} + +func (db *l2KeyValueStore) Has(key []byte) (bool, error) { + key = unwrapKey(key) + if len(key) != common.HashLength { + return false, l2.ErrInvalidKeyLength + } + _, err := db.kv.Get(common.Hash(key)) + switch err { + case kvstore.ErrNotFound: + return false, nil + case nil: + return true, nil + default: + return false, err + } +} + +func (db *l2KeyValueStore) Put(key []byte, value []byte) error { + key = unwrapKey(key) + // For statedb operations, we only expect code and preimage keys of hash length + if len(key) != common.HashLength { + return l2.ErrInvalidKeyLength + } + return db.kv.Put(common.Hash(key), value) +} + +func (db *l2KeyValueStore) NewBatch() ethdb.Batch { + return &batch{db: db} +} + +func (db *l2KeyValueStore) NewBatchWithSize(size int) ethdb.Batch { + return &batch{db: db} +} + +// batch is similar to memorydb.batch, but adapted for kvstore.KV +type batch struct { + db *l2KeyValueStore + writes []keyvalue + size int +} + +var _ ethdb.Batch = (*batch)(nil) + +type keyvalue struct { + key []byte + value []byte +} + +func (b *batch) Put(key []byte, value []byte) error { + b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value)}) + b.size += len(key) + len(value) + return nil +} + +func (b *batch) Delete(key []byte) error { + // ignore deletes + return nil +} + +func (b *batch) ValueSize() int { + return b.size +} + +func (b *batch) Write() error { + for _, keyvalue := range b.writes { + if err := b.db.kv.Put(common.Hash(keyvalue.key), keyvalue.value); err != nil { + return err + } + } + return nil +} + +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.size = 0 +} + +func (b *batch) Replay(w ethdb.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} diff --git a/op-program/host/common/l2_store_test.go b/op-program/host/common/l2_store_test.go new file mode 100644 index 00000000000..3a31cb6d3c9 --- /dev/null +++ b/op-program/host/common/l2_store_test.go @@ -0,0 +1,175 @@ +package common + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-program/client/l2" + "github.com/ethereum-optimism/optimism/op-program/host/kvstore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/stretchr/testify/require" +) + +func TestL2KeyValueStore(t *testing.T) { + preimageKey := common.HexToHash("0xdead") + codeKey := make([]byte, common.HashLength+len(rawdb.CodePrefix)) + copy(codeKey, rawdb.CodePrefix) + copy(codeKey[len(rawdb.CodePrefix):], common.Hex2Bytes("0xdead")) + value := []byte("value") + codeValue := []byte("code") + t.Run("Preimage", func(t *testing.T) { + kv := kvstore.NewMemKV() + db := NewL2KeyValueStore(kv) + require.NoError(t, db.Put(preimageKey[:], value)) + + readValue, err := db.Get(preimageKey[:]) + require.NoError(t, err) + require.Equal(t, value, readValue) + kvValue, err := kv.Get(preimageKey) + require.NoError(t, err) + require.Equal(t, value, kvValue) + + has, err := db.Has(preimageKey[:]) + require.NoError(t, err) + require.True(t, has) + }) + t.Run("Code", func(t *testing.T) { + kv := kvstore.NewMemKV() + db := NewL2KeyValueStore(kv) + require.NoError(t, db.Put(codeKey, codeValue)) + + readValue, err := db.Get(codeKey) + require.NoError(t, err) + require.Equal(t, codeValue, readValue) + kvValue, err := kv.Get(common.Hash(codeKey[1:])) + require.NoError(t, err) + require.Equal(t, codeValue, kvValue) + + has, err := db.Has(codeKey) + require.NoError(t, err) + require.True(t, has) + }) + t.Run("InvalidKey", func(t *testing.T) { + kv := kvstore.NewMemKV() + db := NewL2KeyValueStore(kv) + _, err := db.Get([]byte("invalid")) + require.ErrorIs(t, err, l2.ErrInvalidKeyLength) + has, err := db.Has([]byte("invalid")) + require.ErrorIs(t, err, l2.ErrInvalidKeyLength) + require.False(t, has) + err = db.Put([]byte("invalid"), []byte("value")) + require.ErrorIs(t, err, l2.ErrInvalidKeyLength) + }) + t.Run("MissingPreimage", func(t *testing.T) { + kv := kvstore.NewMemKV() + db := NewL2KeyValueStore(kv) + has, err := db.Has(preimageKey[:]) + require.NoError(t, err) + require.False(t, has) + }) + t.Run("MissingCode", func(t *testing.T) { + kv := kvstore.NewMemKV() + db := NewL2KeyValueStore(kv) + has, err := db.Has(codeKey) + require.NoError(t, err) + require.False(t, has) + }) + t.Run("Batch", func(t *testing.T) { + kv := &mockKV{data: make(map[common.Hash][]byte)} + db := NewL2KeyValueStore(kv) + batch := db.NewBatch() + require.NoError(t, batch.Put(preimageKey[:], value)) + expectedBatchSize := len(preimageKey) + len(value) + require.Equal(t, expectedBatchSize, batch.ValueSize()) + + require.NoError(t, batch.Put(codeKey, codeValue)) + expectedBatchSize += len(codeKey) + len(codeValue) + require.Equal(t, expectedBatchSize, batch.ValueSize()) + + has, err := db.Has(preimageKey[:]) + require.NoError(t, err) + require.True(t, has) + has, err = db.Has(codeKey) + require.NoError(t, err) + require.True(t, has) + + require.NoError(t, batch.Write()) + + has, err = db.Has(preimageKey[:]) + require.NoError(t, err) + require.True(t, has) + require.Equal(t, 2, kv.puts) + }) + t.Run("Batch-Reset", func(t *testing.T) { + kv := &mockKV{data: make(map[common.Hash][]byte)} + db := NewL2KeyValueStore(kv) + batch := db.NewBatch() + require.NoError(t, batch.Put(preimageKey[:], value)) + require.NoError(t, batch.Put(codeKey, codeValue)) + batch.Reset() + + require.NoError(t, batch.Write()) + require.Zero(t, kv.puts) + + require.Equal(t, 0, batch.ValueSize()) + preimageKey2 := common.HexToHash("0xdead2") + require.NoError(t, batch.Put(preimageKey2[:], value)) + require.NoError(t, batch.Write()) + require.Equal(t, 1, kv.puts) + }) + t.Run("Batch-Replay", func(t *testing.T) { + kv := &mockKV{data: make(map[common.Hash][]byte)} + db := NewL2KeyValueStore(kv) + batch := db.NewBatch() + require.NoError(t, batch.Put(preimageKey[:], value)) + require.NoError(t, batch.Put(codeKey, codeValue)) + + writer := &mockWriter{data: make(map[common.Hash][]byte)} + require.NoError(t, batch.Replay(writer)) + require.Zero(t, kv.puts) + require.Zero(t, kv.gets) + require.Equal(t, 2, writer.puts) + require.Equal(t, value, writer.data[preimageKey]) + // this is the raw code key, not the code preimage key + require.Equal(t, codeValue, writer.data[common.Hash(codeKey)]) + }) +} + +type mockKV struct { + puts int + gets int + data map[common.Hash][]byte +} + +func (k *mockKV) Put(key common.Hash, value []byte) error { + k.puts++ + k.data[key] = value + return nil +} + +func (k *mockKV) Get(key common.Hash) ([]byte, error) { + k.gets++ + return k.data[key], nil +} + +func (k *mockKV) Close() error { + return nil +} + +type mockWriter struct { + puts int + deletes int + data map[common.Hash][]byte +} + +func (w *mockWriter) Put(key []byte, value []byte) error { + w.puts++ + w.data[common.Hash(key)] = value + return nil +} + +func (w *mockWriter) Delete(key []byte) error { + w.deletes++ + delete(w.data, common.Hash(key)) + return nil +} diff --git a/op-program/host/config/config.go b/op-program/host/config/config.go index 474c9978726..5962db9cdbc 100644 --- a/op-program/host/config/config.go +++ b/op-program/host/config/config.go @@ -113,7 +113,7 @@ func (c *Config) Check() error { if c.L1Head == (common.Hash{}) { return ErrInvalidL1Head } - if c.L2Head == (common.Hash{}) { + if !c.InteropEnabled && c.L2Head == (common.Hash{}) { return ErrInvalidL2Head } if c.L2OutputRoot == (common.Hash{}) { @@ -240,6 +240,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { } var l2OutputRoot common.Hash var agreedPrestate []byte + var interopEnabled bool if ctx.IsSet(flags.L2OutputRoot.Name) { l2OutputRoot = common.HexToHash(ctx.String(flags.L2OutputRoot.Name)) } else if ctx.IsSet(flags.L2AgreedPrestate.Name) { @@ -249,6 +250,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { return nil, ErrInvalidAgreedPrestate } l2OutputRoot = crypto.Keccak256Hash(agreedPrestate) + interopEnabled = true } if l2OutputRoot == (common.Hash{}) { return nil, ErrInvalidL2OutputRoot @@ -346,6 +348,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { L1RPCKind: sources.RPCProviderKind(ctx.String(flags.L1RPCProviderKind.Name)), ExecCmd: ctx.String(flags.Exec.Name), ServerMode: ctx.Bool(flags.Server.Name), + InteropEnabled: interopEnabled, }, nil } diff --git a/op-program/host/config/config_test.go b/op-program/host/config/config_test.go index 4cd18a8e4a1..128183a6e55 100644 --- a/op-program/host/config/config_test.go +++ b/op-program/host/config/config_test.go @@ -83,11 +83,20 @@ func TestL1HeadRequired(t *testing.T) { require.ErrorIs(t, err, ErrInvalidL1Head) } -func TestL2HeadRequired(t *testing.T) { - config := validConfig() - config.L2Head = common.Hash{} - err := config.Check() - require.ErrorIs(t, err, ErrInvalidL2Head) +func TestL2Head(t *testing.T) { + t.Run("RequiredPreInterop", func(t *testing.T) { + config := validConfig() + config.L2Head = common.Hash{} + err := config.Check() + require.ErrorIs(t, err, ErrInvalidL2Head) + }) + + t.Run("NotRequiredForInterop", func(t *testing.T) { + config := validInteropConfig() + config.L2Head = common.Hash{} + err := config.Check() + require.NoError(t, err) + }) } func TestL2OutputRootRequired(t *testing.T) { diff --git a/op-program/host/host.go b/op-program/host/host.go index 5f04fb8ccbe..38e2d036222 100644 --- a/op-program/host/host.go +++ b/op-program/host/host.go @@ -5,7 +5,9 @@ import ( "fmt" "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-node/rollup" preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-program/client/l2" hostcommon "github.com/ethereum-optimism/optimism/op-program/host/common" "github.com/ethereum-optimism/optimism/op-program/host/config" "github.com/ethereum-optimism/optimism/op-program/host/flags" @@ -18,6 +20,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" ) type Prefetcher interface { @@ -112,17 +115,54 @@ func (p *programExecutor) RunProgram( prefetcher hostcommon.Prefetcher, blockNum uint64, chainID eth.ChainID, + db l2.KeyValueStore, ) error { newCfg := *p.cfg - newCfg.L2ChainID = chainID + newCfg.ExecCmd = "" // ensure we run the program in the same process newCfg.L2ClaimBlockNumber = blockNum + newCfg.InteropEnabled = false + // Leave the newCfg.L2ChainID as is. It may be set to the customChainID for testing. + // newCfg.L2ChainConfigs and newCfg.Rollups will be reconfigured to the specified chainID for the program execution. + + // Since the ProgramExecutor can be used for interop with custom chain configs, we need to + // restrict the host's chain configuration to a single chain. + var l2ChainConfig *params.ChainConfig + for _, c := range newCfg.L2ChainConfigs { + if eth.ChainIDFromBig(c.ChainID).Cmp(chainID) == 0 { + l2ChainConfig = c + break + } + } + if l2ChainConfig == nil { + return fmt.Errorf("could not find L2 chain config in the host for chain ID %v", chainID) + } + var rollupConfig *rollup.Config + for _, c := range newCfg.Rollups { + if eth.ChainIDFromBig(c.L2ChainID).Cmp(chainID) == 0 { + rollupConfig = c + break + } + } + if rollupConfig == nil { + return fmt.Errorf("could not find rollup config in the host for chain ID %v", chainID) + } + newCfg.L2ChainConfigs = []*params.ChainConfig{l2ChainConfig} + newCfg.Rollups = []*rollup.Config{rollupConfig} withPrefetcher := hostcommon.WithPrefetcher( func(context.Context, log.Logger, kvstore.KV, *config.Config) (hostcommon.Prefetcher, error) { // TODO(#13663): prevent recursive block execution return prefetcher, nil }) - return hostcommon.FaultProofProgram(ctx, p.logger, &newCfg, withPrefetcher, hostcommon.WithSkipValidation(true)) + return hostcommon.FaultProofProgram( + ctx, + p.logger, + &newCfg, + withPrefetcher, + hostcommon.WithSkipValidation(true), + hostcommon.WithDB(db), + hostcommon.WithStoreBlockData(true), + ) } func MakeProgramExecutor(logger log.Logger, cfg *config.Config) prefetcher.ProgramExecutor { diff --git a/op-program/host/prefetcher/prefetcher_test.go b/op-program/host/prefetcher/prefetcher_test.go index 13f93b9734e..42411477907 100644 --- a/op-program/host/prefetcher/prefetcher_test.go +++ b/op-program/host/prefetcher/prefetcher_test.go @@ -997,7 +997,7 @@ type mockExecutor struct { } func (m *mockExecutor) RunProgram( - ctx context.Context, prefetcher hostcommon.Prefetcher, blockNumber uint64, chainID eth.ChainID) error { + ctx context.Context, prefetcher hostcommon.Prefetcher, blockNumber uint64, chainID eth.ChainID, db l2.KeyValueStore) error { m.invoked = true m.blockNumber = blockNumber m.chainID = chainID diff --git a/op-program/host/prefetcher/reexec.go b/op-program/host/prefetcher/reexec.go index d335eaae43d..ba47e2a1a88 100644 --- a/op-program/host/prefetcher/reexec.go +++ b/op-program/host/prefetcher/reexec.go @@ -6,6 +6,7 @@ import ( "fmt" preimage "github.com/ethereum-optimism/optimism/op-preimage" + "github.com/ethereum-optimism/optimism/op-program/client/l2" hostcommon "github.com/ethereum-optimism/optimism/op-program/host/common" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" @@ -15,7 +16,7 @@ import ( type ProgramExecutor interface { // RunProgram derives the block at the specified blockNumber - RunProgram(ctx context.Context, prefetcher hostcommon.Prefetcher, blockNumber uint64, chainID eth.ChainID) error + RunProgram(ctx context.Context, prefetcher hostcommon.Prefetcher, blockNumber uint64, chainID eth.ChainID, db l2.KeyValueStore) error } // nativeReExecuteBlock is a helper function that re-executes a block natively. @@ -56,7 +57,7 @@ func (p *Prefetcher) nativeReExecuteBlock( return err } p.logger.Info("Re-executing block", "block_hash", blockHash, "block_number", header.NumberU64()) - if err = p.executor.RunProgram(ctx, p, header.NumberU64()+1, chainID); err != nil { + if err = p.executor.RunProgram(ctx, p, header.NumberU64()+1, chainID, hostcommon.NewL2KeyValueStore(p.kvStore)); err != nil { return err } diff --git a/op-program/prestates/releases.json b/op-program/prestates/releases.json index 15f85f65969..aee12130347 100644 --- a/op-program/prestates/releases.json +++ b/op-program/prestates/releases.json @@ -1,4 +1,13 @@ [ + { + "version": "1.5.0-rc.1", + "hash": "0x03f83792f653160f3274b0888e998077a27e1f74cb35bcb20d86021e769340aa", + "type": "cannon64" + }, + { + "version": "1.5.0-rc.1", + "hash": "0x03dfa3b3ac66e8fae9f338824237ebacff616df928cf7dada0e14be2531bc1f4" + }, { "version": "1.4.1-rc.3", "hash": "0x03d7f817d7bb1321533aeeee5e0f2031cc69d167c4a17bf2816b4cc8b1be4077", diff --git a/op-proposer/proposer/driver.go b/op-proposer/proposer/driver.go index 32c94da8b3c..b130d0b82ca 100644 --- a/op-proposer/proposer/driver.go +++ b/op-proposer/proposer/driver.go @@ -6,6 +6,7 @@ import ( "fmt" "math/big" "sync" + "sync/atomic" "time" "github.com/ethereum-optimism/optimism/op-proposer/bindings" @@ -77,8 +78,7 @@ type L2OutputSubmitter struct { ctx context.Context cancel context.CancelFunc - mutex sync.Mutex - running bool + running atomic.Bool l2ooContract L2OOContract l2ooABI *abi.ABI @@ -163,13 +163,9 @@ func newDGFSubmitter(ctx context.Context, cancel context.CancelFunc, setup Drive func (l *L2OutputSubmitter) StartL2OutputSubmitting() error { l.Log.Info("Starting Proposer") - l.mutex.Lock() - defer l.mutex.Unlock() - - if l.running { + if !l.running.CompareAndSwap(false, true) { return errors.New("proposer is already running") } - l.running = true if l.Cfg.WaitNodeSync { err := l.waitNodeSync() @@ -196,13 +192,9 @@ func (l *L2OutputSubmitter) StopL2OutputSubmittingIfRunning() error { func (l *L2OutputSubmitter) StopL2OutputSubmitting() error { l.Log.Info("Stopping Proposer") - l.mutex.Lock() - defer l.mutex.Unlock() - - if !l.running { + if !l.running.CompareAndSwap(true, false) { return ErrProposerNotRunning } - l.running = false l.cancel() close(l.done) diff --git a/op-service/eth/super_root.go b/op-service/eth/super_root.go index 57728f58b4b..e1a5727b579 100644 --- a/op-service/eth/super_root.go +++ b/op-service/eth/super_root.go @@ -148,24 +148,27 @@ func (i *ChainRootInfo) UnmarshalJSON(input []byte) error { } type SuperRootResponse struct { - Timestamp uint64 `json:"timestamp"` - SuperRoot Bytes32 `json:"superRoot"` + CrossSafeDerivedFrom BlockID `json:"crossSafeDerivedFrom"` + Timestamp uint64 `json:"timestamp"` + SuperRoot Bytes32 `json:"superRoot"` // Chains is the list of ChainRootInfo for each chain in the dependency set. // It represents the state of the chain at or before the Timestamp. Chains []ChainRootInfo `json:"chains"` } type superRootResponseMarshalling struct { - Timestamp hexutil.Uint64 `json:"timestamp"` - SuperRoot common.Hash `json:"superRoot"` - Chains []ChainRootInfo `json:"chains"` + CrossSafeDerivedFrom BlockID `json:"crossSafeDerivedFrom"` + Timestamp hexutil.Uint64 `json:"timestamp"` + SuperRoot common.Hash `json:"superRoot"` + Chains []ChainRootInfo `json:"chains"` } func (r SuperRootResponse) MarshalJSON() ([]byte, error) { return json.Marshal(&superRootResponseMarshalling{ - Timestamp: hexutil.Uint64(r.Timestamp), - SuperRoot: common.Hash(r.SuperRoot), - Chains: r.Chains, + CrossSafeDerivedFrom: r.CrossSafeDerivedFrom, + Timestamp: hexutil.Uint64(r.Timestamp), + SuperRoot: common.Hash(r.SuperRoot), + Chains: r.Chains, }) } @@ -174,6 +177,7 @@ func (r *SuperRootResponse) UnmarshalJSON(input []byte) error { if err := json.Unmarshal(input, &dec); err != nil { return err } + r.CrossSafeDerivedFrom = dec.CrossSafeDerivedFrom r.Timestamp = uint64(dec.Timestamp) r.SuperRoot = Bytes32(dec.SuperRoot) r.Chains = dec.Chains diff --git a/op-service/sources/engine_client.go b/op-service/sources/engine_client.go index 66b6521065f..53a40414f4c 100644 --- a/op-service/sources/engine_client.go +++ b/op-service/sources/engine_client.go @@ -3,7 +3,6 @@ package sources import ( "context" "fmt" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/eth/catalyst" @@ -49,10 +48,9 @@ func NewEngineClient(client client.RPC, log log.Logger, metrics caching.Metrics, // EngineAPIClient is an RPC client for the Engine API functions. type EngineAPIClient struct { - RPC client.RPC - log log.Logger - evp EngineVersionProvider - timeout time.Duration + RPC client.RPC + log log.Logger + evp EngineVersionProvider } type EngineVersionProvider interface { @@ -63,19 +61,9 @@ type EngineVersionProvider interface { func NewEngineAPIClient(rpc client.RPC, l log.Logger, evp EngineVersionProvider) *EngineAPIClient { return &EngineAPIClient{ - RPC: rpc, - log: l, - evp: evp, - timeout: time.Second * 5, - } -} - -func NewEngineAPIClientWithTimeout(rpc client.RPC, l log.Logger, evp EngineVersionProvider, timeout time.Duration) *EngineAPIClient { - return &EngineAPIClient{ - RPC: rpc, - log: l, - evp: evp, - timeout: timeout, + RPC: rpc, + log: l, + evp: evp, } } @@ -90,11 +78,9 @@ func (s *EngineAPIClient) ForkchoiceUpdate(ctx context.Context, fc *eth.Forkchoi llog := s.log.New("state", fc) // local logger tlog := llog.New("attr", attributes) // trace logger tlog.Trace("Sharing forkchoice-updated signal") - fcCtx, cancel := context.WithTimeout(ctx, s.timeout) - defer cancel() var result eth.ForkchoiceUpdatedResult method := s.evp.ForkchoiceUpdatedVersion(attributes) - err := s.RPC.CallContext(fcCtx, &result, string(method), fc, attributes) + err := s.RPC.CallContext(ctx, &result, string(method), fc, attributes) if err != nil { llog.Warn("Failed to share forkchoice-updated signal", "err", err) return nil, err @@ -113,16 +99,14 @@ func (s *EngineAPIClient) NewPayload(ctx context.Context, payload *eth.Execution e := s.log.New("block_hash", payload.BlockHash) e.Trace("sending payload for execution") - execCtx, cancel := context.WithTimeout(ctx, s.timeout) - defer cancel() var result eth.PayloadStatusV1 var err error switch method := s.evp.NewPayloadVersion(uint64(payload.Timestamp)); method { case eth.NewPayloadV3: - err = s.RPC.CallContext(execCtx, &result, string(method), payload, []common.Hash{}, parentBeaconBlockRoot) + err = s.RPC.CallContext(ctx, &result, string(method), payload, []common.Hash{}, parentBeaconBlockRoot) case eth.NewPayloadV2: - err = s.RPC.CallContext(execCtx, &result, string(method), payload) + err = s.RPC.CallContext(ctx, &result, string(method), payload) default: return nil, fmt.Errorf("unsupported NewPayload version: %s", method) } diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index e45613e2a30..c4bdc550e0f 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/l1access" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/processors" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/rewinder" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/superevents" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/frontend" @@ -66,6 +67,9 @@ type SupervisorBackend struct { chainMetrics locks.RWMap[eth.ChainID, *chainMetrics] emitter event.Emitter + + // Rewinder for handling reorgs + rewinder *rewinder.Rewinder } var _ event.AttachEmitter = (*SupervisorBackend)(nil) @@ -123,8 +127,11 @@ func NewSupervisorBackend(ctx context.Context, logger log.Logger, eventSys: eventSys, sysCancel: sysCancel, sysContext: sysCtx, + + rewinder: rewinder.New(logger, chainsDBs, l1Accessor), } eventSys.Register("backend", super, event.DefaultRegisterOpts()) + eventSys.Register("rewinder", super.rewinder, event.DefaultRegisterOpts()) // create node controller super.syncNodesController = syncnode.NewSyncNodesController(logger, depSet, eventSys, super) @@ -520,6 +527,9 @@ func (su *SupervisorBackend) SuperRootAtTimestamp(ctx context.Context, timestamp }) chainInfos := make([]eth.ChainRootInfo, len(chains)) superRootChains := make([]eth.ChainIDAndOutput, len(chains)) + + var crossSafeDerivedFrom eth.BlockID + for i, chainID := range chains { src, ok := su.syncSources.Get(chainID) if !ok { @@ -541,15 +551,28 @@ func (su *SupervisorBackend) SuperRootAtTimestamp(ctx context.Context, timestamp Pending: pending.Marshal(), } superRootChains[i] = eth.ChainIDAndOutput{ChainID: chainID, Output: canonicalRoot} + + ref, err := src.L2BlockRefByTimestamp(ctx, uint64(timestamp)) + if err != nil { + return eth.SuperRootResponse{}, err + } + derivedFrom, err := su.chainDBs.CrossDerivedFrom(chainID, ref.ID()) + if err != nil { + return eth.SuperRootResponse{}, err + } + if crossSafeDerivedFrom.Number == 0 || crossSafeDerivedFrom.Number < derivedFrom.Number { + crossSafeDerivedFrom = derivedFrom.ID() + } } superRoot := eth.SuperRoot(ð.SuperV1{ Timestamp: uint64(timestamp), Chains: superRootChains, }) return eth.SuperRootResponse{ - Timestamp: uint64(timestamp), - SuperRoot: superRoot, - Chains: chainInfos, + CrossSafeDerivedFrom: crossSafeDerivedFrom, + Timestamp: uint64(timestamp), + SuperRoot: superRoot, + Chains: chainInfos, }, nil } diff --git a/op-supervisor/supervisor/backend/cross/safe_update.go b/op-supervisor/supervisor/backend/cross/safe_update.go index 12fa96edbd7..7f93128f828 100644 --- a/op-supervisor/supervisor/backend/cross/safe_update.go +++ b/op-supervisor/supervisor/backend/cross/safe_update.go @@ -132,11 +132,8 @@ type CrossSafeWorker struct { } func (c *CrossSafeWorker) OnEvent(ev event.Event) bool { - switch x := ev.(type) { + switch ev.(type) { case superevents.UpdateCrossSafeRequestEvent: - if x.ChainID != c.chainID { - return false - } if err := CrossSafeUpdate(c.logger, c.chainID, c.d); err != nil { if errors.Is(err, types.ErrFuture) { c.logger.Debug("Worker awaits additional blocks", "err", err) diff --git a/op-supervisor/supervisor/backend/cross/unsafe_update.go b/op-supervisor/supervisor/backend/cross/unsafe_update.go index 51dafdc193a..095b4e58b56 100644 --- a/op-supervisor/supervisor/backend/cross/unsafe_update.go +++ b/op-supervisor/supervisor/backend/cross/unsafe_update.go @@ -79,11 +79,8 @@ type CrossUnsafeWorker struct { } func (c *CrossUnsafeWorker) OnEvent(ev event.Event) bool { - switch x := ev.(type) { + switch ev.(type) { case superevents.UpdateCrossUnsafeRequestEvent: - if x.ChainID != c.chainID { - return false - } if err := CrossUnsafeUpdate(c.logger, c.chainID, c.d); err != nil { if errors.Is(err, types.ErrFuture) { c.logger.Debug("Worker awaits additional blocks", "err", err) diff --git a/op-supervisor/supervisor/backend/db/db.go b/op-supervisor/supervisor/backend/db/db.go index a9a9eaad673..6a424cad346 100644 --- a/op-supervisor/supervisor/backend/db/db.go +++ b/op-supervisor/supervisor/backend/db/db.go @@ -65,7 +65,8 @@ type LocalDerivedFromStorage interface { NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) - RewindToL2(derived uint64) error + RewindToScope(scope eth.BlockID) error + RewindToFirstDerived(v eth.BlockID) error } var _ LocalDerivedFromStorage = (*fromda.DB)(nil) diff --git a/op-supervisor/supervisor/backend/db/fromda/db_test.go b/op-supervisor/supervisor/backend/db/fromda/db_test.go index ef4e4e3eb0b..2175dd73afd 100644 --- a/op-supervisor/supervisor/backend/db/fromda/db_test.go +++ b/op-supervisor/supervisor/backend/db/fromda/db_test.go @@ -647,14 +647,15 @@ func testManyEntryDB(t *testing.T, offsetL1 uint64, offsetL2 uint64) { }) } -// TestRewind tests what happens if we rewind -func TestRewind(t *testing.T) { +// TestRewindToScope tests what happens if we rewind based on derived-from scope. +func TestRewindToScope(t *testing.T) { l1Block0 := mockL1(0) l1Block1 := mockL1(1) l1Block2 := mockL1(2) l1Block3 := mockL1(3) l1Block4 := mockL1(4) l1Block5 := mockL1(5) + l1Block6 := mockL1(6) l2Block0 := mockL2(0) l2Block1 := mockL2(1) @@ -679,17 +680,17 @@ func TestRewind(t *testing.T) { require.Equal(t, l2Block2, pair.Derived) // Rewind to the future - require.ErrorIs(t, db.RewindToL1(6), types.ErrFuture) + require.ErrorIs(t, db.RewindToScope(l1Block6.ID()), types.ErrFuture) // Rewind to the exact block we're at - require.NoError(t, db.RewindToL1(l1Block5.Number)) + require.NoError(t, db.RewindToScope(l1Block5.ID())) pair, err = db.Latest() require.NoError(t, err) require.Equal(t, l1Block5, pair.DerivedFrom) require.Equal(t, l2Block2, pair.Derived) // Now rewind to L1 block 3 (inclusive). - require.NoError(t, db.RewindToL1(l1Block3.Number)) + require.NoError(t, db.RewindToScope(l1Block3.ID())) // See if we find consistent data pair, err = db.Latest() @@ -698,14 +699,74 @@ func TestRewind(t *testing.T) { require.Equal(t, l2Block1, pair.Derived) // Rewind further to L1 block 1 (inclusive). - require.NoError(t, db.RewindToL1(l1Block1.Number)) + require.NoError(t, db.RewindToScope(l1Block1.ID())) pair, err = db.Latest() require.NoError(t, err) require.Equal(t, l1Block1, pair.DerivedFrom) require.Equal(t, l2Block1, pair.Derived) // Rewind further to L1 block 0 (inclusive). - require.NoError(t, db.RewindToL1(l1Block0.Number)) + require.NoError(t, db.RewindToScope(l1Block0.ID())) + pair, err = db.Latest() + require.NoError(t, err) + require.Equal(t, l1Block0, pair.DerivedFrom) + require.Equal(t, l2Block0, pair.Derived) + }) +} + +// TestRewindToFirstDerived tests what happens if we rewind based on when a block was first derived. +func TestRewindToFirstDerived(t *testing.T) { + l1Block0 := mockL1(0) + l1Block1 := mockL1(1) + l1Block2 := mockL1(2) + l1Block3 := mockL1(3) + l1Block4 := mockL1(4) + l1Block5 := mockL1(5) + + l2Block0 := mockL2(0) + l2Block1 := mockL2(1) + l2Block2 := mockL2(2) + l2Block3 := mockL2(3) + + runDBTest(t, func(t *testing.T, db *DB, m *stubMetrics) { + // L2 genesis derived from L1 genesis + require.NoError(t, db.AddDerived(toRef(l1Block0, common.Hash{}), toRef(l2Block0, common.Hash{}))) + // Many L1 blocks all repeating the same L2 block + l2Ref1 := toRef(l2Block1, l2Block0.Hash) + require.NoError(t, db.AddDerived(toRef(l1Block1, l1Block0.Hash), l2Ref1)) + require.NoError(t, db.AddDerived(toRef(l1Block2, l1Block1.Hash), l2Ref1)) + require.NoError(t, db.AddDerived(toRef(l1Block3, l1Block2.Hash), l2Ref1)) + require.NoError(t, db.AddDerived(toRef(l1Block4, l1Block3.Hash), l2Ref1)) + // New L1 block that finally produces a new L2 block + require.NoError(t, db.AddDerived(toRef(l1Block5, l1Block4.Hash), toRef(l2Block2, l2Block1.Hash))) + }, func(t *testing.T, db *DB, m *stubMetrics) { + + pair, err := db.Latest() + require.NoError(t, err) + require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l2Block2, pair.Derived) + + // Rewind to the future + require.ErrorIs(t, db.RewindToFirstDerived(l2Block3.ID()), types.ErrFuture) + + // Rewind to the exact block we're at + require.NoError(t, db.RewindToFirstDerived(l2Block2.ID())) + pair, err = db.Latest() + require.NoError(t, err) + require.Equal(t, l1Block5, pair.DerivedFrom) + require.Equal(t, l2Block2, pair.Derived) + + // Now rewind to L2 block 1 + require.NoError(t, db.RewindToFirstDerived(l2Block1.ID())) + + // See if we went back to the first occurrence of L2 block 1. + pair, err = db.Latest() + require.NoError(t, err) + require.Equal(t, l1Block1, pair.DerivedFrom) + require.Equal(t, l2Block1, pair.Derived) + + // Rewind further to L2 block 0 (inclusive). + require.NoError(t, db.RewindToFirstDerived(l2Block0.ID())) pair, err = db.Latest() require.NoError(t, err) require.Equal(t, l1Block0, pair.DerivedFrom) diff --git a/op-supervisor/supervisor/backend/db/fromda/update.go b/op-supervisor/supervisor/backend/db/fromda/update.go index 915b1855fa1..92afe0799eb 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update.go +++ b/op-supervisor/supervisor/backend/db/fromda/update.go @@ -90,13 +90,20 @@ func (db *DB) Rewind(target types.DerivedBlockSealPair, including bool) error { return db.rewindLocked(target, including) } -// RewindToL2 rewinds to the first entry where the L2 block with the given number was derived. -func (db *DB) RewindToL2(derived uint64) error { +// RewindToScope rewinds the DB to the last entry with +// a derivedFrom value matching the given scope (inclusive, scope is retained in DB). +// Note that this drop L1 blocks that resulted in a previously invalidated local-safe block. +// This returns ErrFuture if the block is newer than the last known block. +// This returns ErrConflict if a different block at the given height is known. +func (db *DB) RewindToScope(scope eth.BlockID) error { db.rwLock.Lock() defer db.rwLock.Unlock() - _, link, err := db.firstDerivedFrom(derived) + _, link, err := db.lastDerivedAt(scope.Number) if err != nil { - return fmt.Errorf("failed to find first derived-from %d: %w", derived, err) + return fmt.Errorf("failed to find last derived %d: %w", scope.Number, err) + } + if link.derivedFrom.ID() != scope { + return fmt.Errorf("found derived-from %s but expected %s: %w", link.derivedFrom, scope, types.ErrConflict) } return db.rewindLocked(types.DerivedBlockSealPair{ DerivedFrom: link.derivedFrom, @@ -104,13 +111,17 @@ func (db *DB) RewindToL2(derived uint64) error { }, false) } -// RewindToL1 rewinds to the last entry that was derived from a L1 block with the given block number. -func (db *DB) RewindToL1(derivedFrom uint64) error { +// RewindToFirstDerived rewinds to the first time +// when v was derived (inclusive, v is retained in DB). +func (db *DB) RewindToFirstDerived(v eth.BlockID) error { db.rwLock.Lock() defer db.rwLock.Unlock() - _, link, err := db.lastDerivedAt(derivedFrom) + _, link, err := db.firstDerivedFrom(v.Number) if err != nil { - return fmt.Errorf("failed to find last derived %d: %w", derivedFrom, err) + return fmt.Errorf("failed to find when %d was first derived: %w", v.Number, err) + } + if link.derived.ID() != v { + return fmt.Errorf("found derived %s but expected %s: %w", link.derived, v, types.ErrConflict) } return db.rewindLocked(types.DerivedBlockSealPair{ DerivedFrom: link.derivedFrom, diff --git a/op-supervisor/supervisor/backend/db/query.go b/op-supervisor/supervisor/backend/db/query.go index ad5e6572878..4c24f2b9c4a 100644 --- a/op-supervisor/supervisor/backend/db/query.go +++ b/op-supervisor/supervisor/backend/db/query.go @@ -45,14 +45,12 @@ func (db *ChainsDB) LastCommonL1() (types.BlockSeal, error) { if err != nil { return types.BlockSeal{}, fmt.Errorf("failed to determine Last Common L1: %w", err) } - derivedFrom := last.DerivedFrom - commonL1 = derivedFrom // if the common block isn't yet set, // or if the new common block is older than the current common block // set the common block if commonL1 == (types.BlockSeal{}) || - derivedFrom.Number < commonL1.Number { - commonL1 = derivedFrom + last.DerivedFrom.Number < commonL1.Number { + commonL1 = last.DerivedFrom } } return commonL1, nil @@ -200,7 +198,7 @@ func (db *ChainsDB) FinalizedL1() eth.BlockRef { func (db *ChainsDB) Finalized(chainID eth.ChainID) (types.BlockSeal, error) { finalizedL1 := db.finalizedL1.Get() if finalizedL1 == (eth.L1BlockRef{}) { - return types.BlockSeal{}, fmt.Errorf("no finalized L1 signal, cannot determine L2 finality of chain %s yet", chainID) + return types.BlockSeal{}, fmt.Errorf("no finalized L1 signal, cannot determine L2 finality of chain %s yet: %w", chainID, types.ErrFuture) } // compare the finalized L1 block with the last derived block in the cross DB @@ -224,14 +222,14 @@ func (db *ChainsDB) Finalized(chainID eth.ChainID) (types.BlockSeal, error) { } // otherwise, use the finalized L1 block to determine the final L2 block that was derived from it - derived, err := db.LastDerivedFrom(chainID, finalizedL1.ID()) + derived, err := db.LastCrossDerivedFrom(chainID, finalizedL1.ID()) if err != nil { return types.BlockSeal{}, fmt.Errorf("could not find what was last derived in L2 chain %s from the finalized L1 block %s: %w", chainID, finalizedL1, err) } return derived, nil } -func (db *ChainsDB) LastDerivedFrom(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { +func (db *ChainsDB) LastCrossDerivedFrom(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { crossDB, ok := db.crossDBs.Get(chainID) if !ok { return types.BlockSeal{}, types.ErrUnknownChain diff --git a/op-supervisor/supervisor/backend/db/query_test.go b/op-supervisor/supervisor/backend/db/query_test.go new file mode 100644 index 00000000000..d734a2f8434 --- /dev/null +++ b/op-supervisor/supervisor/backend/db/query_test.go @@ -0,0 +1,169 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +type mockDerivedFromStorage struct { + latestFn func() (pair types.DerivedBlockSealPair, err error) +} + +func (m *mockDerivedFromStorage) First() (pair types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) Latest() (pair types.DerivedBlockSealPair, err error) { + if m.latestFn != nil { + return m.latestFn() + } + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) Invalidated() (pair types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { + return nil +} +func (m *mockDerivedFromStorage) ReplaceInvalidatedBlock(replacementDerived eth.BlockRef, invalidated common.Hash) (types.DerivedBlockSealPair, error) { + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) RewindAndInvalidate(invalidated types.DerivedBlockRefPair) error { + return nil +} +func (m *mockDerivedFromStorage) LastDerivedAt(derivedFrom eth.BlockID) (derived types.BlockSeal, err error) { + return types.BlockSeal{}, nil +} +func (m *mockDerivedFromStorage) IsDerived(derived eth.BlockID) error { + return nil +} +func (m *mockDerivedFromStorage) DerivedFrom(derived eth.BlockID) (derivedFrom types.BlockSeal, err error) { + return types.BlockSeal{}, nil +} +func (m *mockDerivedFromStorage) FirstAfter(derivedFrom, derived eth.BlockID) (next types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) NextDerivedFrom(derivedFrom eth.BlockID) (nextDerivedFrom types.BlockSeal, err error) { + return types.BlockSeal{}, nil +} +func (m *mockDerivedFromStorage) NextDerived(derived eth.BlockID) (next types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{}, nil +} +func (m *mockDerivedFromStorage) PreviousDerivedFrom(derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) { + return types.BlockSeal{}, nil +} +func (m *mockDerivedFromStorage) PreviousDerived(derived eth.BlockID) (prevDerived types.BlockSeal, err error) { + return types.BlockSeal{}, nil +} +func (m *mockDerivedFromStorage) RewindToScope(scope eth.BlockID) error { + return nil +} +func (m *mockDerivedFromStorage) RewindToFirstDerived(derived eth.BlockID) error { + return nil +} + +func sampleDepSet(t *testing.T) depset.DependencySet { + depSet, err := depset.NewStaticConfigDependencySet( + map[eth.ChainID]*depset.StaticConfigDependency{ + eth.ChainIDFromUInt64(900): { + ChainIndex: 900, + ActivationTime: 42, + HistoryMinTime: 100, + }, + eth.ChainIDFromUInt64(901): { + ChainIndex: 901, + ActivationTime: 30, + HistoryMinTime: 20, + }, + eth.ChainIDFromUInt64(902): { + ChainIndex: 902, + ActivationTime: 30, + HistoryMinTime: 20, + }, + }) + require.NoError(t, err) + return depSet +} + +func TestCommonL1UnknownChain(t *testing.T) { + m1 := &mockDerivedFromStorage{} + m2 := &mockDerivedFromStorage{} + logger := testlog.Logger(t, log.LevelDebug) + chainDB := NewChainsDB(logger, sampleDepSet(t)) + + // add a mock local derived-from storage to drive the test + chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(900), m1) + chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(901), m2) + // don't attach a mock for chain 902 + + _, err := chainDB.LastCommonL1() + require.ErrorIs(t, err, types.ErrUnknownChain) +} + +func TestCommonL1(t *testing.T) { + m1 := &mockDerivedFromStorage{} + m2 := &mockDerivedFromStorage{} + m3 := &mockDerivedFromStorage{} + logger := testlog.Logger(t, log.LevelDebug) + chainDB := NewChainsDB(logger, sampleDepSet(t)) + + // add a mock local derived-from storage to drive the test + chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(900), m1) + chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(901), m2) + chainDB.AddLocalDerivedFromDB(eth.ChainIDFromUInt64(902), m3) + + // returnN is a helper function which creates a Latest Function for the test + returnN := func(n uint64) func() (pair types.DerivedBlockSealPair, err error) { + return func() (pair types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Number: n, + }, + }, nil + } + } + t.Run("pattern 1", func(t *testing.T) { + m1.latestFn = returnN(1) + m2.latestFn = returnN(2) + m3.latestFn = returnN(3) + + latest, err := chainDB.LastCommonL1() + require.NoError(t, err) + require.Equal(t, uint64(1), latest.Number) + }) + t.Run("pattern 2", func(t *testing.T) { + m1.latestFn = returnN(3) + m2.latestFn = returnN(2) + m3.latestFn = returnN(1) + + latest, err := chainDB.LastCommonL1() + require.NoError(t, err) + require.Equal(t, uint64(1), latest.Number) + }) + t.Run("pattern 3", func(t *testing.T) { + m1.latestFn = returnN(99) + m2.latestFn = returnN(1) + m3.latestFn = returnN(98) + + latest, err := chainDB.LastCommonL1() + require.NoError(t, err) + require.Equal(t, uint64(1), latest.Number) + }) + t.Run("error", func(t *testing.T) { + m1.latestFn = returnN(99) + m2.latestFn = returnN(1) + m3.latestFn = func() (pair types.DerivedBlockSealPair, err error) { + return types.DerivedBlockSealPair{}, fmt.Errorf("error") + } + latest, err := chainDB.LastCommonL1() + require.Error(t, err) + require.Equal(t, types.BlockSeal{}, latest) + }) +} diff --git a/op-supervisor/supervisor/backend/db/update.go b/op-supervisor/supervisor/backend/db/update.go index 5556b2a0dae..5e3df48a4c3 100644 --- a/op-supervisor/supervisor/backend/db/update.go +++ b/op-supervisor/supervisor/backend/db/update.go @@ -55,7 +55,7 @@ func (db *ChainsDB) Rewind(chain eth.ChainID, headBlock eth.BlockID) error { if !ok { return fmt.Errorf("cannot Rewind (localDB not found): %w: %s", types.ErrUnknownChain, chain) } - if err := localDB.RewindToL2(headBlock.Number); err != nil { + if err := localDB.RewindToFirstDerived(headBlock); err != nil { return fmt.Errorf("failed to rewind localDB to block %v: %w", headBlock, err) } @@ -64,7 +64,7 @@ func (db *ChainsDB) Rewind(chain eth.ChainID, headBlock eth.BlockID) error { if !ok { return fmt.Errorf("cannot Rewind (crossDB not found): %w: %s", types.ErrUnknownChain, chain) } - if err := crossDB.RewindToL2(headBlock.Number); err != nil { + if err := crossDB.RewindToFirstDerived(headBlock); err != nil { return fmt.Errorf("failed to rewind crossDB to block %v: %w", headBlock, err) } return nil @@ -196,6 +196,46 @@ func (db *ChainsDB) InvalidateLocalSafe(chainID eth.ChainID, candidate types.Der return nil } +// RewindLocalSafe removes all local-safe blocks after the given new derived-from scope. +// Note that this drop L1 blocks that resulted in a previously invalidated local-safe block. +// This returns ErrFuture if the block is newer than the last known block. +// This returns ErrConflict if a different block at the given height is known. +func (db *ChainsDB) RewindLocalSafe(chainID eth.ChainID, scope eth.BlockID) error { + localSafeDB, ok := db.localDBs.Get(chainID) + if !ok { + return fmt.Errorf("cannot find local-safe DB of chain %s for invalidation: %w", chainID, types.ErrUnknownChain) + } + if err := localSafeDB.RewindToScope(scope); err != nil { + return fmt.Errorf("failed to rewind local-safe: %w", err) + } + return nil +} + +// RewindCrossSafe removes all cross-safe blocks after the given new derived-from scope. +// This returns ErrFuture if the block is newer than the last known block. +// This returns ErrConflict if a different block at the given height is known. +func (db *ChainsDB) RewindCrossSafe(chainID eth.ChainID, scope eth.BlockID) error { + crossSafeDB, ok := db.crossDBs.Get(chainID) + if !ok { + return fmt.Errorf("cannot find cross-safe DB of chain %s for invalidation: %w", chainID, types.ErrUnknownChain) + } + if err := crossSafeDB.RewindToScope(scope); err != nil { + return fmt.Errorf("failed to rewind cross-safe: %w", err) + } + return nil +} + +func (db *ChainsDB) RewindLogs(chainID eth.ChainID, newHead types.BlockSeal) error { + eventsDB, ok := db.logDBs.Get(chainID) + if !ok { + return fmt.Errorf("cannot find events DB of chain %s for invalidation: %w", chainID, types.ErrUnknownChain) + } + if err := eventsDB.Rewind(newHead.ID()); err != nil { + return fmt.Errorf("failed to rewind logs of chain %s: %w", chainID, err) + } + return nil +} + func (db *ChainsDB) ResetCrossUnsafeIfNewerThan(chainID eth.ChainID, number uint64) error { crossUnsafe, ok := db.crossUnsafe.Get(chainID) if !ok { diff --git a/op-supervisor/supervisor/backend/l1access/l1_accessor.go b/op-supervisor/supervisor/backend/l1access/l1_accessor.go index 3c9cd12cbd5..9f6b7ef470d 100644 --- a/op-supervisor/supervisor/backend/l1access/l1_accessor.go +++ b/op-supervisor/supervisor/backend/l1access/l1_accessor.go @@ -39,9 +39,9 @@ type L1Accessor struct { finalitySub ethereum.Subscription - // tipHeight is the height of the L1 chain tip - // used to block access to requests more recent than the confirmation depth - tipHeight uint64 + // tip is the L1 chain tip. Used to block access to requests more recent than + // the confirmation depth, and to detect reorgs + tip eth.BlockID latestSub ethereum.Subscription confDepth uint64 @@ -159,7 +159,26 @@ func (p *L1Accessor) onFinalized(ctx context.Context, ref eth.L1BlockRef) { } func (p *L1Accessor) onLatest(ctx context.Context, ref eth.L1BlockRef) { - p.tipHeight = ref.Number + // Stop if the block is the same or older than the tip + if ref.Hash == p.tip.Hash { + p.log.Info("Latest L1 block signal is the same as the tip", "ref", ref) + return + } + if ref.Number < p.tip.Number { + p.log.Warn("L1 block is older than the tip", "ref", ref) + return + } + + // If the incoming block is not the child of the current tip, signal a potential reorg + if ref.ParentHash != p.tip.Hash { + p.emitter.Emit(superevents.RewindL1Event{ + IncomingBlock: ref.ID(), + }) + p.log.Info("Reorg detected", "ref", ref) + } + + // Update the tip + p.tip = ref.ID() p.log.Info("Updated latest known L1 block", "ref", ref) } @@ -170,7 +189,7 @@ func (p *L1Accessor) L1BlockRefByNumber(ctx context.Context, number uint64) (eth return eth.L1BlockRef{}, errors.New("no L1 source available") } // block access to requests more recent than the confirmation depth - if number > p.tipHeight-p.confDepth { + if number > p.tip.Number-p.confDepth { return eth.L1BlockRef{}, ethereum.NotFound } return p.client.L1BlockRefByNumber(ctx, number) diff --git a/op-supervisor/supervisor/backend/l1access/l1_accessor_test.go b/op-supervisor/supervisor/backend/l1access/l1_accessor_test.go index 044b7c69eee..bc42f821397 100644 --- a/op-supervisor/supervisor/backend/l1access/l1_accessor_test.go +++ b/op-supervisor/supervisor/backend/l1access/l1_accessor_test.go @@ -5,9 +5,10 @@ import ( "log/slog" "testing" + "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/stretchr/testify/require" ) type mockL1Source struct { @@ -41,14 +42,14 @@ func TestL1Accessor(t *testing.T) { }, nil } accessor := NewL1Accessor(context.Background(), log, source) - accessor.tipHeight = 10 + accessor.tip = eth.BlockID{Number: 10} // Test L1BlockRefByNumber ref, err := accessor.L1BlockRefByNumber(context.Background(), 5) require.NoError(t, err) require.Equal(t, uint64(5), ref.Number) - // Test L1BlockRefByNumber with number in excess of tipHeight + // Test L1BlockRefByNumber with number in excess of tip height ref, err = accessor.L1BlockRefByNumber(context.Background(), 9) require.Error(t, err) diff --git a/op-supervisor/supervisor/backend/rewinder/rewinder.go b/op-supervisor/supervisor/backend/rewinder/rewinder.go new file mode 100644 index 00000000000..e1ff2849c0b --- /dev/null +++ b/op-supervisor/supervisor/backend/rewinder/rewinder.go @@ -0,0 +1,251 @@ +package rewinder + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-node/rollup/event" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/superevents" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +type l1Node interface { + L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) +} + +type rewinderDB interface { + DependencySet() depset.DependencySet + + LastCrossDerivedFrom(chainID eth.ChainID, derivedFrom eth.BlockID) (derived types.BlockSeal, err error) + PreviousDerivedFrom(chain eth.ChainID, derivedFrom eth.BlockID) (prevDerivedFrom types.BlockSeal, err error) + CrossDerivedFromBlockRef(chainID eth.ChainID, derived eth.BlockID) (derivedFrom eth.BlockRef, err error) + + LocalSafe(eth.ChainID) (types.DerivedBlockSealPair, error) + CrossSafe(eth.ChainID) (types.DerivedBlockSealPair, error) + + RewindLocalSafe(eth.ChainID, eth.BlockID) error + RewindCrossSafe(eth.ChainID, eth.BlockID) error + RewindLogs(chainID eth.ChainID, newHead types.BlockSeal) error + + FindSealedBlock(eth.ChainID, uint64) (types.BlockSeal, error) + Finalized(eth.ChainID) (types.BlockSeal, error) + + LocalDerivedFrom(chain eth.ChainID, derived eth.BlockID) (derivedFrom types.BlockSeal, err error) +} + +// Rewinder is responsible for handling the rewinding of databases to the latest common ancestor between +// the local databases and L2 node. +type Rewinder struct { + log log.Logger + emitter event.Emitter + l1Node l1Node + db rewinderDB +} + +func New(log log.Logger, db rewinderDB, l1Node l1Node) *Rewinder { + return &Rewinder{ + log: log.New("component", "rewinder"), + db: db, + l1Node: l1Node, + } +} + +func (r *Rewinder) AttachEmitter(em event.Emitter) { + r.emitter = em +} + +func (r *Rewinder) OnEvent(ev event.Event) bool { + switch x := ev.(type) { + case superevents.RewindL1Event: + r.handleRewindL1Event(x) + return true + case superevents.LocalSafeUpdateEvent: + r.handleLocalDerivedEvent(x) + return true + default: + return false + } +} + +// handleRewindL1Event iterates known chains and checks each one for a reorg +// If a reorg is detected, it will rewind the chain to the latest common ancestor +// between the local-safe head and the finalized head. +func (r *Rewinder) handleRewindL1Event(ev superevents.RewindL1Event) { + for _, chainID := range r.db.DependencySet().Chains() { + if err := r.rewindL1ChainIfReorged(chainID, ev.IncomingBlock); err != nil { + r.log.Error("failed to rewind L1 data:", "chain", chainID, "err", err) + } + } +} + +// handleLocalDerivedEvent checks if the newly derived block matches what we have in our unsafe DB +// If it doesn't match, we need to rewind the logs DB to the common ancestor between +// the LocalUnsafe head and the new LocalSafe block +func (r *Rewinder) handleLocalDerivedEvent(ev superevents.LocalSafeUpdateEvent) { + // Get the block at the derived height from our unsafe chain + newSafeHead := ev.NewLocalSafe.Derived + unsafeVersion, err := r.db.FindSealedBlock(ev.ChainID, newSafeHead.Number) + if err != nil { + r.log.Error("failed to get unsafe block at derived height", "chain", ev.ChainID, "height", newSafeHead.Number, "err", err) + return + } + + // If the block hashes match, our unsafe chain is still valid + if unsafeVersion.Hash == newSafeHead.Hash { + return + } + + // Try rewinding the logs DB to the parent of the new safe head + // If it fails with a data conflict walk back through the chain + // until we find a common ancestor or reach the finalized block + finalized, err := r.db.Finalized(ev.ChainID) + if err != nil { + if errors.Is(err, types.ErrFuture) { + finalized = types.BlockSeal{Number: 0} + } else { + r.log.Error("failed to get finalized block", "chain", ev.ChainID, "err", err) + return + } + } + var target types.BlockSeal + for height := int64(newSafeHead.Number - 1); height >= int64(finalized.Number); height-- { + // Get the block at this height + target, err = r.db.FindSealedBlock(ev.ChainID, uint64(height)) + if err != nil { + r.log.Error("failed to get sealed block", "chain", ev.ChainID, "height", height, "err", err) + return + } + + _, err := r.db.LocalDerivedFrom(ev.ChainID, target.ID()) + if err != nil { + if errors.Is(err, types.ErrConflict) || errors.Is(err, types.ErrFuture) { + continue + } + + r.log.Error("failed to get derived from block", "chain", ev.ChainID, "block", target.ID(), "err", err) + return + } + + break + } + + // Try to rewind and stop if it succeeds + err = r.db.RewindLogs(ev.ChainID, target) + if err != nil { + r.log.Error("failed to rewind logs DB", "chain", ev.ChainID, "err", err) + return + } + + // Emit event to trigger node reset with new heads + r.emitter.Emit(superevents.ChainRewoundEvent{ChainID: ev.ChainID}) +} + +// rewindL1ChainIfReorged rewinds the L1 chain for the given chain ID if a reorg is detected +// It checks the local-safe head against the canonical L1 block at the same height +func (r *Rewinder) rewindL1ChainIfReorged(chainID eth.ChainID, newTip eth.BlockID) error { + // Get the current LocalSafe head and its L1 block + localSafe, err := r.db.LocalSafe(chainID) + if err != nil { + return fmt.Errorf("failed to get local safe for chain %s: %w", chainID, err) + } + localSafeL1 := localSafe.DerivedFrom + + // Get the canonical L1 block at our local head's height + canonicalL1, err := r.l1Node.L1BlockRefByNumber(context.Background(), localSafeL1.Number) + if err != nil { + return fmt.Errorf("failed to get canonical L1 block at height %d: %w", localSafeL1.Number, err) + } + + // If we're still on the canonical chain, nothing to do + if canonicalL1.Hash == localSafeL1.Hash { + return nil + } + + // Get the finalized block as our lower bound + finalized, err := r.db.Finalized(chainID) + if err != nil { + // If we don't have a finalized block, use the genesis block + if errors.Is(err, types.ErrFuture) { + finalized, err = r.db.FindSealedBlock(chainID, 0) + if err != nil { + return fmt.Errorf("failed to get index 0 block for chain %s: %w", chainID, err) + } + } else { + return fmt.Errorf("failed to get finalized block for chain %s: %w", chainID, err) + } + } + finalizedL1, err := r.db.CrossDerivedFromBlockRef(chainID, finalized.ID()) + if err != nil { + return fmt.Errorf("failed to get finalized L1 block for chain %s: %w", chainID, err) + } + + // Find the common ancestor by walking back through L1 blocks + commonL1Ancestor := finalizedL1.ID() + currentL1 := localSafeL1.ID() + for currentL1.Number >= finalizedL1.Number { + // Get the canonical L1 block at this height from the node + remoteL1, err := r.l1Node.L1BlockRefByNumber(context.Background(), currentL1.Number) + if err != nil { + return fmt.Errorf("failed to get L1 block at height %d: %w", currentL1.Number, err) + } + + // If hashes match, we found the common ancestor + if remoteL1.Hash == currentL1.Hash { + commonL1Ancestor = currentL1 + break + } + + // Get the previous L1 block from our DB + prevDerivedFrom, err := r.db.PreviousDerivedFrom(chainID, currentL1) + if err != nil { + // If we hit the first block, use it as common ancestor + if errors.Is(err, types.ErrPreviousToFirst) { + // Still need to verify this block is canonical + remoteFirst, err := r.l1Node.L1BlockRefByNumber(context.Background(), currentL1.Number) + if err != nil { + return fmt.Errorf("failed to get first L1 block: %w", err) + } + if remoteFirst.Hash == currentL1.Hash { + commonL1Ancestor = currentL1 + } else { + // First block isn't canonical, use finalized + commonL1Ancestor = finalizedL1.ID() + } + break + } + return fmt.Errorf("failed to get previous L1 block: %w", err) + } + + // Move to the parent + currentL1 = prevDerivedFrom.ID() + } + + // Rewind LocalSafe to not include data derived from the old L1 chain + if err := r.db.RewindLocalSafe(chainID, commonL1Ancestor); err != nil { + if errors.Is(err, types.ErrFuture) { + r.log.Warn("Rewinding on L1 reorg, but local-safe DB does not have L1 block", "block", commonL1Ancestor, "err", err) + } else { + return fmt.Errorf("failed to rewind local-safe for chain %s: %w", chainID, err) + } + } + + // Rewind CrossSafe to not include data derived from the old L1 chain + if err := r.db.RewindCrossSafe(chainID, commonL1Ancestor); err != nil { + if errors.Is(err, types.ErrFuture) { + r.log.Warn("Rewinding on L1 reorg, but cross-safe DB does not have L1 block", "block", commonL1Ancestor, "err", err) + } else { + return fmt.Errorf("failed to rewind cross-safe for chain %s: %w", chainID, err) + } + } + + // Emit rewound event for sync node + r.emitter.Emit(superevents.ChainRewoundEvent{ + ChainID: chainID, + }) + return nil +} diff --git a/op-supervisor/supervisor/backend/rewinder/rewinder_test.go b/op-supervisor/supervisor/backend/rewinder/rewinder_test.go new file mode 100644 index 00000000000..cb59b2e2fa8 --- /dev/null +++ b/op-supervisor/supervisor/backend/rewinder/rewinder_test.go @@ -0,0 +1,1000 @@ +package rewinder + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/rollup/event" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/fromda" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/db/logs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/superevents" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +// TestRewindL1 tests handling of L1 reorgs by checking that: +// 1. Only safe data is rewound +// 2. Unsafe data remains intact +// 3. The rewind point is determined by finding the common L1 ancestor +func TestRewindL1(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + + chainID := eth.ChainID{1} + chain := s.chains[chainID] + + genesis, block1, block2A, block2B := createTestBlocks() + + // Setup sync node with all blocks + chain.setupSyncNodeBlocks(genesis, block1, block2A, block2B) + + // Setup L1 blocks - initially we have block1A and block2A + l1Block0 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 899, + } + l1Block1A := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 900, + ParentHash: l1Block0.Hash, + } + l1Block2A := eth.BlockRef{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + Time: 901, + ParentHash: l1Block1A.Hash, + } + + // Setup the L1 node with initial chain + chain.l1Node.blocks[l1Block0.Number] = l1Block0 + chain.l1Node.blocks[l1Block1A.Number] = l1Block1A + chain.l1Node.blocks[l1Block2A.Number] = l1Block2A + + // Seal genesis and block1 + s.sealBlocks(chainID, genesis, block1) + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Make genesis block derived from l1Block0 and make it safe + s.makeBlockSafe(chainID, genesis, l1Block0, true) + + // Make block1 local-safe and cross-safe using l1Block1A + s.makeBlockSafe(chainID, block1, l1Block1A, true) + + // Add block2A and make it local-safe and cross-safe using l1Block2A + s.sealBlocks(chainID, block2A) + s.makeBlockSafe(chainID, block2A, l1Block2A, true) + + // Verify block2A is the latest sealed block and is cross-safe + s.verifyHeads(chainID, block2A.ID(), "should have set block2A as latest sealed block") + + // Now simulate L1 reorg by replacing l1Block2A with l1Block2B + l1Block2B := eth.BlockRef{ + Hash: common.HexToHash("0xbbb2"), + Number: 2, + Time: 901, + ParentHash: l1Block1A.Hash, + } + chain.l1Node.blocks[l1Block2B.Number] = l1Block2B + + // Trigger L1 reorg + i.OnEvent(superevents.RewindL1Event{ + IncomingBlock: l1Block2B.ID(), + }) + + // Verify we rewound to block1 since it's derived from l1Block1A which is still canonical + s.verifyHeads(chainID, block1.ID(), "should have rewound to block1") +} + +// TestRewindL2 tests handling of L2 reorgs via LocalDerivedEvent by checking that: +// 1. Only unsafe data is rewound +// 2. Safe data remains intact +// 3. The rewind point is determined by the parent of the mismatched block +func TestRewindL2(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + + chainID := eth.ChainID{1} + chain := s.chains[chainID] + + genesis, block1, block2A, block2B := createTestBlocks() + + // Setup sync node with all blocks + chain.setupSyncNodeBlocks(genesis, block1, block2A, block2B) + + // Setup L1 blocks + l1Genesis := eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 899, + } + l1Block1 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 900, + ParentHash: l1Genesis.Hash, + } + l1Block2 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + Time: 901, + ParentHash: l1Block1.Hash, + } + chain.l1Node.blocks[l1Genesis.Number] = l1Genesis + chain.l1Node.blocks[l1Block1.Number] = l1Block1 + chain.l1Node.blocks[l1Block2.Number] = l1Block2 + + // Seal genesis and block1 + s.sealBlocks(chainID, genesis, block1) + + // Make genesis safe and derived from L1 genesis + s.makeBlockSafe(chainID, genesis, l1Genesis, true) + + // Make block1 local-safe and cross-safe + s.makeBlockSafe(chainID, block1, l1Block1, true) + + // Add block2A to unsafe chain + s.sealBlocks(chainID, block2A) + + // Verify block2A is the latest sealed block but not safe + s.verifyLogsHead(chainID, block2A.ID(), "should have set block2A as latest sealed block") + s.verifyLocalSafe(chainID, block1.ID(), "block1 should still be local-safe") + s.verifyCrossSafe(chainID, block1.ID(), "block1 should be cross-safe") + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Simulate receiving a LocalDerivedDoneEvent for block2B + i.OnEvent(superevents.LocalSafeUpdateEvent{ + ChainID: chainID, + NewLocalSafe: types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Hash: l1Block1.Hash, + Number: l1Block1.Number, + }, + Derived: types.BlockSeal{ + Hash: block2B.Hash, + Number: block2B.Number, + }, + }, + }) + + // Verify we rewound to block1 since block2B doesn't match our unsafe block2A + s.verifyLogsHead(chainID, block1.ID(), "should have rewound to block1") + s.verifyLocalSafe(chainID, block1.ID(), "block1 should still be local-safe") + s.verifyCrossSafe(chainID, block1.ID(), "block1 should still be cross-safe") + + // Add block2B + s.sealBlocks(chainID, block2B) + + // Verify we're now on the new chain + s.verifyLogsHead(chainID, block2B.ID(), "should be on block2B") +} + +// TestNoRewindNeeded tests that no rewind occurs when: +// 1. L1 blocks match during L1 reorg check +// 2. L2 blocks match during LocalDerived check +func TestNoRewindNeeded(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + + chainID := eth.ChainID{1} + chain := s.chains[chainID] + + genesis, block1, block2A, _ := createTestBlocks() + + // Setup sync node with blocks + chain.setupSyncNodeBlocks(genesis, block1, block2A) + + // Setup L1 blocks + l1Block1 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 1001, + ParentHash: common.HexToHash("0xaaa0"), + } + l1Block2 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + Time: 1002, + ParentHash: l1Block1.Hash, + } + chain.l1Node.blocks[l1Block1.Number] = l1Block1 + chain.l1Node.blocks[l1Block2.Number] = l1Block2 + + // Seal genesis and block1 + s.sealBlocks(chainID, genesis, block1) + + // Make genesis safe and derived from L1 genesis + s.makeBlockSafe(chainID, genesis, eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 1000, + }, true) + + // Set genesis L1 block as finalized + s.chainsDB.OnEvent(superevents.FinalizedL1RequestEvent{ + FinalizedL1: eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 1000, + }, + }) + + // Make block1 local-safe and cross-safe + s.makeBlockSafe(chainID, block1, l1Block1, true) + + // Add block2A and make it local-safe and cross-safe + s.sealBlocks(chainID, block2A) + s.makeBlockSafe(chainID, block2A, l1Block2, true) + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Trigger L1 reorg check with same L1 block - should not rewind + i.OnEvent(superevents.RewindL1Event{ + IncomingBlock: l1Block2.ID(), + }) + + // Verify no rewind occurred + s.verifyLogsHead(chainID, block2A.ID(), "should still be on block2A") + s.verifyCrossSafe(chainID, block2A.ID(), "block2A should still be cross-safe") + + // Trigger LocalDerived check with same L2 block - should not rewind + i.OnEvent(superevents.LocalSafeUpdateEvent{ + ChainID: chainID, + NewLocalSafe: types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Hash: l1Block2.Hash, + Number: l1Block2.Number, + }, + Derived: types.BlockSeal{ + Hash: block2A.Hash, + Number: block2A.Number, + }, + }, + }) + + // Verify no rewind occurred + s.verifyLogsHead(chainID, block2A.ID(), "should still be on block2A") + s.verifyCrossSafe(chainID, block2A.ID(), "block2A should still be cross-safe") +} + +// TestRewindLongChain syncs a long chain and rewinds many blocks. +func TestRewindLongChain(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + + chainID := eth.ChainID{1} + chain := s.chains[chainID] + + // Create a chain with blocks 0-100 + var blocks []eth.L2BlockRef + var l1Blocks []eth.BlockRef + + // Create L1 blocks first (one per 10 L2 blocks) + for i := uint64(0); i <= 10; i++ { + l1Block := eth.BlockRef{ + Hash: common.HexToHash(fmt.Sprintf("0xaaa%d", i)), + Number: i, + Time: 900 + i*12, + } + if i > 0 { + l1Block.ParentHash = l1Blocks[i-1].Hash + } + l1Blocks = append(l1Blocks, l1Block) + chain.l1Node.blocks[i] = l1Block + } + + // Create L2 blocks 0-100 + for i := uint64(0); i <= 100; i++ { + l1Index := i / 10 + block := eth.L2BlockRef{ + Hash: common.HexToHash(fmt.Sprintf("0x%d", i)), + Number: i, + Time: 1000 + i, + L1Origin: l1Blocks[l1Index].ID(), + SequenceNumber: i % 10, + } + if i > 0 { + block.ParentHash = blocks[i-1].Hash + } + blocks = append(blocks, block) + } + + // Setup sync node with all blocks + chain.setupSyncNodeBlocks(blocks...) + + // Seal all blocks + for _, block := range blocks { + s.sealBlocks(chainID, block) + } + + // Make genesis safe and derived from L1 genesis + s.makeBlockSafe(chainID, blocks[0], l1Blocks[0], true) + + // Set genesis L1 block as finalized + s.chainsDB.OnEvent(superevents.FinalizedL1RequestEvent{ + FinalizedL1: l1Blocks[0], + }) + + // Make blocks up to 95 safe + for i := uint64(1); i <= 95; i++ { + l1Index := i / 10 + s.makeBlockSafe(chainID, blocks[i], l1Blocks[l1Index], true) + } + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Create a divergent block96B + block96B := eth.L2BlockRef{ + Hash: common.HexToHash("0xdead96"), + Number: 96, + ParentHash: blocks[95].Hash, + Time: 1000 + 96, + L1Origin: blocks[96].L1Origin, + SequenceNumber: 96 % 10, + } + + // Trigger LocalDerived event with block96B + i.OnEvent(superevents.LocalSafeUpdateEvent{ + ChainID: chainID, + NewLocalSafe: types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Hash: l1Blocks[96/10].Hash, + Number: l1Blocks[96/10].Number, + }, + Derived: types.BlockSeal{ + Hash: block96B.Hash, + Number: block96B.Number, + }, + }, + }) + + // Verify we rewound to block 95 + s.verifyLogsHead(chainID, blocks[95].ID(), "should have rewound to block 95") +} + +// TestRewindMultiChain syncs two chains and rewinds both +func TestRewindMultiChain(t *testing.T) { + chain1ID := eth.ChainID{1} + chain2ID := eth.ChainID{2} + s := setupTestChains(t, chain1ID, chain2ID) + defer s.Close() + + // Create common blocks for both chains + genesis, block1, block2A, block2B := createTestBlocks() + + // Setup L1 block + l1Genesis := eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 899, + } + l1Block1 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 900, + ParentHash: l1Genesis.Hash, + } + + // Setup both chains + for chainID, chain := range s.chains { + // Setup nodes + chain.setupSyncNodeBlocks(genesis, block1, block2A, block2B) + chain.l1Node.blocks[l1Genesis.Number] = l1Genesis + chain.l1Node.blocks[l1Block1.Number] = l1Block1 + + // Setup initial chain + s.sealBlocks(chainID, genesis, block1, block2A) + + // Make genesis safe and derived from L1 genesis + s.makeBlockSafe(chainID, genesis, l1Genesis, true) + + // Make block1 local-safe and cross-safe + s.makeBlockSafe(chainID, block1, l1Block1, true) + } + + // Set genesis as finalized for all chains + s.chainsDB.OnEvent(superevents.FinalizedL1RequestEvent{ + FinalizedL1: l1Genesis, + }) + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, s.chains[chain1ID].l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Trigger LocalDerived events for both chains + for chainID := range s.chains { + i.OnEvent(superevents.LocalSafeUpdateEvent{ + ChainID: chainID, + NewLocalSafe: types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Hash: l1Block1.Hash, + Number: l1Block1.Number, + }, + Derived: types.BlockSeal{ + Hash: block2B.Hash, + Number: block2B.Number, + }, + }, + }) + } + + // Verify both chains rewound to block1 and maintained proper state + for chainID := range s.chains { + s.verifyLogsHead(chainID, block1.ID(), fmt.Sprintf("chain %v should have rewound to block1", chainID)) + s.verifyCrossSafe(chainID, block1.ID(), fmt.Sprintf("chain %v block1 should be cross-safe", chainID)) + } +} + +// TestRewindL2WalkBack tests that during an L2 reorg, we correctly walk back +// parent-by-parent until finding a common ancestor when the first rewind attempt fails. +func TestRewindL2WalkBack(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + chainID := eth.ChainID{1} + chain := s.chains[chainID] + // Create a chain of blocks: genesis -> block1 -> block2 -> block3 -> block4A + genesis := eth.L2BlockRef{ + Hash: common.HexToHash("0x1110"), + Number: 0, + ParentHash: common.Hash{}, + Time: 1000, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa0"), Number: 0}, + SequenceNumber: 0, + } + block1 := eth.L2BlockRef{ + Hash: common.HexToHash("0x1111"), + Number: 1, + ParentHash: genesis.Hash, + Time: 1001, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa1"), Number: 1}, + SequenceNumber: 1, + } + block2 := eth.L2BlockRef{ + Hash: common.HexToHash("0x1112"), + Number: 2, + ParentHash: block1.Hash, + Time: 1002, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa2"), Number: 2}, + SequenceNumber: 2, + } + block3 := eth.L2BlockRef{ + Hash: common.HexToHash("0x1113"), + Number: 3, + ParentHash: block2.Hash, + Time: 1003, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa3"), Number: 3}, + SequenceNumber: 3, + } + block4A := eth.L2BlockRef{ + Hash: common.HexToHash("0x1114a"), + Number: 4, + ParentHash: block3.Hash, + Time: 1004, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa4"), Number: 4}, + SequenceNumber: 4, + } + // Create a divergent block4B that will trigger the reorg + block4B := eth.L2BlockRef{ + Hash: common.HexToHash("0x1114b"), + Number: 4, + ParentHash: block3.Hash, + Time: 1004, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa4"), Number: 4}, + SequenceNumber: 4, + } + // Setup sync node with all blocks + chain.setupSyncNodeBlocks(genesis, block1, block2, block3, block4A, block4B) + // Setup L1 blocks + l1Genesis := eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 900, + } + l1Block1 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 901, + ParentHash: l1Genesis.Hash, + } + l1Block2 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + Time: 902, + ParentHash: l1Block1.Hash, + } + l1Block3 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa3"), + Number: 3, + Time: 903, + ParentHash: l1Block2.Hash, + } + l1Block4 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa4"), + Number: 4, + Time: 904, + ParentHash: l1Block3.Hash, + } + // Add L1 blocks to node + chain.l1Node.blocks[l1Genesis.Number] = l1Genesis + chain.l1Node.blocks[l1Block1.Number] = l1Block1 + chain.l1Node.blocks[l1Block2.Number] = l1Block2 + chain.l1Node.blocks[l1Block3.Number] = l1Block3 + chain.l1Node.blocks[l1Block4.Number] = l1Block4 + + // Seal all blocks in the original chain + s.sealBlocks(chainID, genesis, block1, block2, block3, block4A) + + // Make genesis safe and derived from L1 genesis + s.makeBlockSafe(chainID, genesis, l1Genesis, true) + + // Set genesis L1 block as finalized + s.chainsDB.OnEvent(superevents.FinalizedL1RequestEvent{ + FinalizedL1: l1Genesis, + }) + + // Make blocks up to block3 safe + s.makeBlockSafe(chainID, block1, l1Block1, true) + s.makeBlockSafe(chainID, block2, l1Block2, true) + s.makeBlockSafe(chainID, block3, l1Block3, true) + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + // Trigger LocalDerived event with block4B + i.OnEvent(superevents.LocalSafeUpdateEvent{ + ChainID: chainID, + NewLocalSafe: types.DerivedBlockSealPair{ + DerivedFrom: types.BlockSeal{ + Hash: block4B.L1Origin.Hash, + Number: block4B.L1Origin.Number, + }, + Derived: types.BlockSeal{ + Hash: block4B.Hash, + Number: block4B.Number, + }, + }, + }) + // Verify we rewound to block3 since it's the common ancestor + s.verifyLogsHead(chainID, block3.ID(), "should have rewound to block3 (common ancestor)") +} + +// TestRewindL1PastCrossSafe tests that when an L1 reorg occurs at a height higher than +// the CrossSafe head, only LocalSafe is rewound and CrossSafe remains untouched. +func TestRewindL1PastCrossSafe(t *testing.T) { + s := setupTestChain(t) + defer s.Close() + + chainID := eth.ChainID{1} + chain := s.chains[chainID] + + // Create blocks: genesis -> block1 -> block2 -> block3A/3B + genesis := eth.L2BlockRef{ + Hash: common.HexToHash("0x1110"), + Number: 0, + ParentHash: common.Hash{}, + Time: 1000, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa0"), Number: 0}, + SequenceNumber: 0, + } + block1 := eth.L2BlockRef{ + Hash: common.HexToHash("0x1111"), + Number: 1, + ParentHash: genesis.Hash, + Time: 1001, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa1"), Number: 1}, + SequenceNumber: 1, + } + block2 := eth.L2BlockRef{ + Hash: common.HexToHash("0x1112"), + Number: 2, + ParentHash: block1.Hash, + Time: 1002, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa2"), Number: 2}, + SequenceNumber: 2, + } + block3A := eth.L2BlockRef{ + Hash: common.HexToHash("0x1113a"), + Number: 3, + ParentHash: block2.Hash, + Time: 1003, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xaaa3"), Number: 3}, + SequenceNumber: 3, + } + block3B := eth.L2BlockRef{ + Hash: common.HexToHash("0x1113b"), + Number: 3, + ParentHash: block2.Hash, + Time: 1003, + L1Origin: eth.BlockID{Hash: common.HexToHash("0xbbb3"), Number: 3}, + SequenceNumber: 3, + } + + // Setup sync node with all blocks + chain.setupSyncNodeBlocks(genesis, block1, block2, block3A, block3B) + + // Setup L1 blocks - initially we have the A chain + l1Genesis := eth.BlockRef{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + Time: 899, + } + l1Block1 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + Time: 900, + ParentHash: l1Genesis.Hash, + } + l1Block2 := eth.BlockRef{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + Time: 901, + ParentHash: l1Block1.Hash, + } + l1Block3A := eth.BlockRef{ + Hash: common.HexToHash("0xaaa3"), + Number: 3, + Time: 902, + ParentHash: l1Block2.Hash, + } + + // Setup the L1 node with initial chain + chain.l1Node.blocks[l1Genesis.Number] = l1Genesis + chain.l1Node.blocks[l1Block1.Number] = l1Block1 + chain.l1Node.blocks[l1Block2.Number] = l1Block2 + chain.l1Node.blocks[l1Block3A.Number] = l1Block3A + + // Seal all blocks + s.sealBlocks(chainID, genesis, block1, block2, block3A) + + // Create rewinder with all dependencies + i := New(s.logger, s.chainsDB, chain.l1Node) + i.AttachEmitter(&mockEmitter{}) + + // Make genesis block derived from l1Genesis and make it safe + s.makeBlockSafe(chainID, genesis, l1Genesis, true) + + // Set l1Genesis as finalized + s.chainsDB.OnEvent(superevents.FinalizedL1RequestEvent{ + FinalizedL1: l1Genesis, + }) + + // Make block1 local-safe and cross-safe + s.makeBlockSafe(chainID, block1, l1Block1, true) + + // Make block2 local-safe and cross-safe + s.makeBlockSafe(chainID, block2, l1Block2, true) + + // Make block3A only local-safe (not cross-safe) + s.makeBlockSafe(chainID, block3A, l1Block3A, false) + + // Verify initial state + s.verifyLogsHead(chainID, block3A.ID(), "should have set block3A as latest sealed block") + s.verifyCrossSafe(chainID, block2.ID(), "block2 should be cross-safe") + + // Now simulate L1 reorg by replacing l1Block3A with l1Block3B + l1Block3B := eth.BlockRef{ + Hash: common.HexToHash("0xbbb3"), + Number: 3, + Time: 902, + ParentHash: l1Block2.Hash, + } + chain.l1Node.blocks[l1Block3B.Number] = l1Block3B + + // Trigger L1 reorg + i.OnEvent(superevents.RewindL1Event{ + IncomingBlock: l1Block3B.ID(), + }) + + // Verify we rewound LocalSafe to block2 since it's derived from l1Block2 which is still canonical + s.verifyHeads(chainID, block2.ID(), "should have rewound to block2") +} + +type testSetup struct { + t *testing.T + logger log.Logger + dataDir string + chainsDB *db.ChainsDB + chains map[eth.ChainID]*testChainSetup +} + +type testChainSetup struct { + chainID eth.ChainID + logDB *logs.DB + localDB *fromda.DB + crossDB *fromda.DB + syncNode *mockSyncNode + l1Node *mockL1Node +} + +// setupTestChains creates multiple test chains with their own DBs and sync nodes +func setupTestChains(t *testing.T, chainIDs ...eth.ChainID) *testSetup { + logger := testlog.Logger(t, log.LvlInfo) + dataDir := t.TempDir() + + // Create dependency set for all chains + deps := make(map[eth.ChainID]*depset.StaticConfigDependency) + for i, chainID := range chainIDs { + deps[chainID] = &depset.StaticConfigDependency{ + ChainIndex: types.ChainIndex(i + 1), + ActivationTime: 42, + HistoryMinTime: 100, + } + } + depSet, err := depset.NewStaticConfigDependencySet(deps) + require.NoError(t, err) + + // Create ChainsDB with mock emitter + chainsDB := db.NewChainsDB(logger, depSet) + chainsDB.AttachEmitter(&mockEmitter{}) + + setup := &testSetup{ + t: t, + logger: logger, + dataDir: dataDir, + chainsDB: chainsDB, + chains: make(map[eth.ChainID]*testChainSetup), + } + + // Setup each chain + for _, chainID := range chainIDs { + // Create the chain directory + chainDir := filepath.Join(dataDir, fmt.Sprintf("00%d", chainID[0]), "1") + err = os.MkdirAll(chainDir, 0o755) + require.NoError(t, err) + + // Create and open the log DB + logDB, err := logs.NewFromFile(logger, &stubMetrics{}, filepath.Join(chainDir, "log.db"), true) + require.NoError(t, err) + chainsDB.AddLogDB(chainID, logDB) + + // Create and open the local derived-from DB + localDB, err := fromda.NewFromFile(logger, &stubMetrics{}, filepath.Join(chainDir, "local_safe.db")) + require.NoError(t, err) + chainsDB.AddLocalDerivedFromDB(chainID, localDB) + + // Create and open the cross derived-from DB + crossDB, err := fromda.NewFromFile(logger, &stubMetrics{}, filepath.Join(chainDir, "cross_safe.db")) + require.NoError(t, err) + chainsDB.AddCrossDerivedFromDB(chainID, crossDB) + + // Add cross-unsafe tracker + chainsDB.AddCrossUnsafeTracker(chainID) + + setup.chains[chainID] = &testChainSetup{ + chainID: chainID, + logDB: logDB, + localDB: localDB, + crossDB: crossDB, + syncNode: newMockSyncNode(), + l1Node: newMockL1Node(), + } + } + + return setup +} + +func (s *testSetup) Close() { + s.chainsDB.Close() + for _, chain := range s.chains { + chain.Close() + } +} + +func (s *testChainSetup) Close() { + s.logDB.Close() + s.localDB.Close() + s.crossDB.Close() +} + +// setupSyncNodeBlocks adds the given blocks to the sync node's block map +func (s *testChainSetup) setupSyncNodeBlocks(blocks ...eth.L2BlockRef) { + for _, block := range blocks { + s.syncNode.blocks[block.Number] = eth.BlockRef{ + Hash: block.Hash, + Number: block.Number, + Time: block.Time, + ParentHash: block.ParentHash, + } + } +} + +func (s *testSetup) makeBlockSafe(chainID eth.ChainID, block eth.L2BlockRef, l1Block eth.BlockRef, makeCrossSafe bool) { + // Add the L1 derivation + s.chainsDB.UpdateLocalSafe(chainID, l1Block, eth.BlockRef{ + Hash: block.Hash, + Number: block.Number, + Time: block.Time, + ParentHash: block.ParentHash, + }) + + if makeCrossSafe { + require.NoError(s.t, s.chainsDB.UpdateCrossUnsafe(chainID, types.BlockSeal{ + Hash: block.Hash, + Number: block.Number, + Timestamp: block.Time, + })) + require.NoError(s.t, s.chainsDB.UpdateCrossSafe(chainID, l1Block, eth.BlockRef{ + Hash: block.Hash, + Number: block.Number, + Time: block.Time, + ParentHash: block.ParentHash, + })) + } +} + +func (s *testSetup) verifyHeads(chainID eth.ChainID, expectedHead eth.BlockID, msg string) { + s.verifyLocalSafe(chainID, expectedHead, msg) + s.verifyCrossSafe(chainID, expectedHead, msg) +} + +func (s *testSetup) verifyLocalSafe(chainID eth.ChainID, expectedHead eth.BlockID, msg string) { + localSafe, err := s.chainsDB.LocalSafe(chainID) + require.NoError(s.t, err) + require.Equal(s.t, expectedHead.Hash, localSafe.Derived.Hash, msg) +} + +func (s *testSetup) verifyCrossSafe(chainID eth.ChainID, expectedHead eth.BlockID, msg string) { + crossSafe, err := s.chainsDB.CrossSafe(chainID) + require.NoError(s.t, err) + require.Equal(s.t, expectedHead.Hash, crossSafe.Derived.Hash, msg) +} + +func (s *testSetup) verifyLogsHead(chainID eth.ChainID, expectedHead eth.BlockID, msg string) { + head, ok := s.chains[chainID].logDB.LatestSealedBlock() + require.True(s.t, ok) + require.Equal(s.t, expectedHead, head, msg) +} + +func (s *testSetup) sealBlocks(chainID eth.ChainID, blocks ...eth.L2BlockRef) { + for _, block := range blocks { + require.NoError(s.t, s.chains[chainID].logDB.SealBlock(block.ParentHash, block.ID(), block.Time)) + } +} + +func setupTestChain(t *testing.T) *testSetup { + chainID := eth.ChainID{1} + return setupTestChains(t, chainID) +} + +func createTestBlocks() (genesis, block1, block2A, block2B eth.L2BlockRef) { + l1Genesis := eth.BlockID{ + Hash: common.HexToHash("0xaaa0"), + Number: 0, + } + l1Block1 := eth.BlockID{ + Hash: common.HexToHash("0xaaa1"), + Number: 1, + } + l1Block2A := eth.BlockID{ + Hash: common.HexToHash("0xaaa2"), + Number: 2, + } + l1Block2B := eth.BlockID{ + Hash: common.HexToHash("0xbbb2"), + Number: 2, + } + + genesis = eth.L2BlockRef{ + Hash: common.HexToHash("0x1110"), + Number: 0, + ParentHash: common.Hash{}, + Time: 1000, + L1Origin: l1Genesis, + SequenceNumber: 0, + } + + block1 = eth.L2BlockRef{ + Hash: common.HexToHash("0x1111"), + Number: 1, + ParentHash: genesis.Hash, + Time: 1001, + L1Origin: l1Block1, + SequenceNumber: 1, + } + + block2A = eth.L2BlockRef{ + Hash: common.HexToHash("0x222a"), + Number: 2, + ParentHash: block1.Hash, + Time: 1002, + L1Origin: l1Block2A, + SequenceNumber: 2, + } + + block2B = eth.L2BlockRef{ + Hash: common.HexToHash("0x222b"), + Number: 2, + ParentHash: block1.Hash, + Time: 1002, + L1Origin: l1Block2B, + SequenceNumber: 2, + } + + return +} + +type mockEmitter struct { + events []event.Event +} + +func (m *mockEmitter) Emit(ev event.Event) { + m.events = append(m.events, ev) +} + +type mockSyncNode struct { + blocks map[uint64]eth.BlockRef +} + +func newMockSyncNode() *mockSyncNode { + return &mockSyncNode{ + blocks: make(map[uint64]eth.BlockRef), + } +} + +func (m *mockSyncNode) BlockRefByNumber(ctx context.Context, number uint64) (eth.BlockRef, error) { + return m.blocks[number], nil +} + +type stubMetrics struct { + entryCount int64 + entriesReadForSearch int64 + derivedEntryCount int64 +} + +func (s *stubMetrics) RecordDBEntryCount(kind string, count int64) { + s.entryCount = count +} + +func (s *stubMetrics) RecordDBSearchEntriesRead(count int64) { + s.entriesReadForSearch = count +} + +func (s *stubMetrics) RecordDBDerivedEntryCount(count int64) { + s.derivedEntryCount = count +} + +var _ logs.Metrics = (*stubMetrics)(nil) + +type mockL1Node struct { + blocks map[uint64]eth.BlockRef +} + +func newMockL1Node() *mockL1Node { + return &mockL1Node{ + blocks: make(map[uint64]eth.BlockRef), + } +} + +func (m *mockL1Node) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) { + block, ok := m.blocks[number] + if !ok { + return eth.L1BlockRef{}, fmt.Errorf("block not found: %d", number) + } + return eth.L1BlockRef{ + Hash: block.Hash, + Number: block.Number, + Time: block.Time, + ParentHash: block.ParentHash, + }, nil +} diff --git a/op-supervisor/supervisor/backend/superevents/events.go b/op-supervisor/supervisor/backend/superevents/events.go index a76d3aed48e..95a57405212 100644 --- a/op-supervisor/supervisor/backend/superevents/events.go +++ b/op-supervisor/supervisor/backend/superevents/events.go @@ -137,6 +137,14 @@ func (ev InvalidateLocalSafeEvent) String() string { return "invalidate-local-safe" } +type RewindL1Event struct { + IncomingBlock eth.BlockID +} + +func (ev RewindL1Event) String() string { + return "rewind-l1" +} + type ReplaceBlockEvent struct { ChainID eth.ChainID Replacement types.BlockReplacement @@ -145,3 +153,11 @@ type ReplaceBlockEvent struct { func (ev ReplaceBlockEvent) String() string { return "replace-block-event" } + +type ChainRewoundEvent struct { + ChainID eth.ChainID +} + +func (ev ChainRewoundEvent) String() string { + return "chain-rewound" +} diff --git a/op-supervisor/supervisor/backend/syncnode/controller_test.go b/op-supervisor/supervisor/backend/syncnode/controller_test.go index 308a0c13c18..fafa7f2340d 100644 --- a/op-supervisor/supervisor/backend/syncnode/controller_test.go +++ b/op-supervisor/supervisor/backend/syncnode/controller_test.go @@ -97,6 +97,10 @@ func (m *mockBackend) LocalSafe(ctx context.Context, chainID eth.ChainID) (pair return types.DerivedIDPair{}, nil } +func (m *mockBackend) CrossSafe(ctx context.Context, chainID eth.ChainID) (types.DerivedIDPair, error) { + return types.DerivedIDPair{}, nil +} + func (m *mockBackend) LocalUnsafe(ctx context.Context, chainID eth.ChainID) (eth.BlockID, error) { return eth.BlockID{}, nil } diff --git a/op-supervisor/supervisor/backend/syncnode/iface.go b/op-supervisor/supervisor/backend/syncnode/iface.go index 4bed9b37bdd..10d77e00771 100644 --- a/op-supervisor/supervisor/backend/syncnode/iface.go +++ b/op-supervisor/supervisor/backend/syncnode/iface.go @@ -27,6 +27,7 @@ type SyncSource interface { ChainID(ctx context.Context) (eth.ChainID, error) OutputV0AtTimestamp(ctx context.Context, timestamp uint64) (*eth.OutputV0, error) PendingOutputV0AtTimestamp(ctx context.Context, timestamp uint64) (*eth.OutputV0, error) + L2BlockRefByTimestamp(ctx context.Context, timestamp uint64) (eth.L2BlockRef, error) // String identifies the sync source String() string } diff --git a/op-supervisor/supervisor/backend/syncnode/node.go b/op-supervisor/supervisor/backend/syncnode/node.go index fd4686e987d..32bd79b731f 100644 --- a/op-supervisor/supervisor/backend/syncnode/node.go +++ b/op-supervisor/supervisor/backend/syncnode/node.go @@ -25,6 +25,7 @@ import ( type backend interface { LocalSafe(ctx context.Context, chainID eth.ChainID) (pair types.DerivedIDPair, err error) LocalUnsafe(ctx context.Context, chainID eth.ChainID) (eth.BlockID, error) + CrossSafe(ctx context.Context, chainID eth.ChainID) (pair types.DerivedIDPair, err error) SafeDerivedAt(ctx context.Context, chainID eth.ChainID, derivedFrom eth.BlockID) (derived eth.BlockID, err error) Finalized(ctx context.Context, chainID eth.ChainID) (eth.BlockID, error) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) @@ -109,7 +110,11 @@ func (m *ManagedNode) OnEvent(ev event.Event) bool { return false } m.resetSignal(x.Err, x.L1Ref) - // TODO: watch for reorg events from DB. Send a reset signal to op-node if needed + case superevents.ChainRewoundEvent: + if x.ChainID != m.chainID { + return false + } + m.sendReset() default: return false } @@ -342,6 +347,36 @@ func (m *ManagedNode) resetSignal(errSignal error, l1Ref eth.BlockRef) { } } +func (m *ManagedNode) sendReset() { + ctx, cancel := context.WithTimeout(m.ctx, internalTimeout) + defer cancel() + + u, err := m.backend.LocalUnsafe(ctx, m.chainID) + if err != nil { + m.log.Warn("Failed to retrieve local-unsafe", "err", err) + return + } + s, err := m.backend.CrossSafe(ctx, m.chainID) + if err != nil { + m.log.Warn("Failed to retrieve cross-safe", "err", err) + return + } + f, err := m.backend.Finalized(ctx, m.chainID) + if err != nil { + if errors.Is(err, types.ErrFuture) { + f = eth.BlockID{Number: 0} + } else { + m.log.Warn("Failed to retrieve finalized", "err", err) + return + } + } + + if err := m.Node.Reset(ctx, u, s.Derived, f); err != nil { + m.log.Warn("Node failed to reset", "err", err) + return + } +} + // resolveConflict attempts to reset the node to a valid state when a conflict is detected. // It first tries using the latest safe block, and if that fails, walks back block by block // until it finds a common ancestor or reaches the finalized block. diff --git a/op-supervisor/supervisor/backend/syncnode/rpc.go b/op-supervisor/supervisor/backend/syncnode/rpc.go index d7d231c782d..4bfcb52d753 100644 --- a/op-supervisor/supervisor/backend/syncnode/rpc.go +++ b/op-supervisor/supervisor/backend/syncnode/rpc.go @@ -81,6 +81,12 @@ func (rs *RPCSyncNode) PendingOutputV0AtTimestamp(ctx context.Context, timestamp return out, err } +func (rs *RPCSyncNode) L2BlockRefByTimestamp(ctx context.Context, timestamp uint64) (eth.L2BlockRef, error) { + var out eth.L2BlockRef + err := rs.cl.CallContext(ctx, &out, "interop_l2BlockRefByTimestamp", timestamp) + return out, err +} + func (rs *RPCSyncNode) String() string { return rs.name } diff --git a/op-wheel/cheat/cheat.go b/op-wheel/cheat/cheat.go index 1cd031d4359..ae43bd56243 100644 --- a/op-wheel/cheat/cheat.go +++ b/op-wheel/cheat/cheat.go @@ -101,8 +101,9 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { return ch.Close() } + isCancun := ch.Blockchain.Config().IsCancun(preHeader.Number, preHeader.Time) // commit the changes, and then update the state-root - stateRoot, err := state.Commit(preHeader.Number.Uint64()+1, true) + stateRoot, err := state.Commit(preHeader.Number.Uint64()+1, true, isCancun) if err != nil { _ = ch.Close() return fmt.Errorf("failed to commit state change: %w", err) @@ -324,7 +325,8 @@ func StoragePatch(patch io.Reader, address common.Address) HeadFn { } i += 1 if i%1000 == 0 { // for every 1000 values, commit to disk - if _, err := headState.Commit(head.Number.Uint64(), true); err != nil { + // warning: if the account is empty, the storage change will not persist. + if _, err := headState.Commit(head.Number.Uint64(), true, false); err != nil { return fmt.Errorf("failed to commit state to disk after patching %d entries: %w", i, err) } } diff --git a/ops/docker/Dockerfile.packages b/ops/docker/Dockerfile.packages deleted file mode 100644 index 4ea958c2184..00000000000 --- a/ops/docker/Dockerfile.packages +++ /dev/null @@ -1,35 +0,0 @@ -# We need to specify the platforms below, otherwise platforms other than -# linux/amd64 will be forced to rebuild the contracts every time this -# image is used. - -# This Dockerfile builds all the dependencies needed by the smart-contracts, excluding Go and Python. - -FROM --platform=linux/amd64 us-docker.pkg.dev/oplabs-tools-artifacts/images/ci-builder:latest AS foundry - -FROM --platform=linux/amd64 debian:bookworm-20240812-slim AS base - -# Base: install deps -RUN apt-get update && apt-get install -y \ - curl \ - jq \ - ca-certificates \ - git \ - make \ - bash \ - --no-install-recommends - -COPY --from=foundry /usr/local/bin/just /usr/local/bin/just -COPY --from=foundry /usr/local/bin/forge /usr/local/bin/forge -COPY --from=foundry /usr/local/bin/cast /usr/local/bin/cast - -WORKDIR /opt/optimism - -COPY ./mise.toml ./mise.toml -COPY ./packages ./packages -COPY .git/ ./.git -COPY .gitmodules ./.gitmodules - -RUN git submodule update --init --recursive \ - && cd packages/contracts-bedrock \ - && just forge-build \ - && echo $(git rev-parse HEAD) > .gitcommit diff --git a/ops/docker/ci-builder/Dockerfile b/ops/docker/ci-builder/Dockerfile deleted file mode 100644 index 1f47a4950ee..00000000000 --- a/ops/docker/ci-builder/Dockerfile +++ /dev/null @@ -1,93 +0,0 @@ -############################################################################### -# BUILDX # -############################################################################### - -FROM --platform=linux/amd64 docker AS buildx -COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx -RUN docker buildx version - - -############################################################################### -# CI BUILDER (BASE) # -############################################################################### - -FROM --platform=linux/amd64 debian:bullseye-slim AS base-builder - -# Use bash as the shell -SHELL ["/bin/bash", "-c"] -ENV SHELL=/bin/bash -ENV BASH=/bin/bash - -# Copy mise configuration -COPY ./mise.toml ./mise.toml - -# Set up mise environment -ENV PATH="/root/.local/share/mise/shims:$PATH" -ENV PATH="/root/.local/bin:${PATH}" - -# Set up cargo environment -ENV PATH="/root/.cargo/bin:${PATH}" - -# Install dependencies -# We do this in one mega RUN command to avoid blowing up the size of the image -ENV DEBIAN_FRONTEND=noninteractive -RUN /bin/sh -c set -eux; \ - apt-get update; \ - apt-get install -y --no-install-recommends bash curl openssh-client git build-essential ca-certificates gnupg binutils-mips-linux-gnu clang libffi-dev; \ - mkdir -p /etc/apt/keyrings; \ - curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg; \ - chmod a+r /etc/apt/keyrings/docker.gpg; \ - echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null; \ - apt-get update; \ - apt-get install -y docker-ce-cli; \ - curl https://mise.run | sh; \ - mise trust ./mise.toml; \ - mise install; \ - curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | bash; \ - pip install capstone pyelftools; \ - go env -w GOMODCACHE=/go/pkg/mod; \ - go env -w GOCACHE=/root/.cache/go-build; \ - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt; \ - apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false; \ - go clean -cache -modcache -testcache; \ - rm -rf /var/lib/apt/lists/*; \ - rm -rf /root/.cache/pip; \ - rm -rf /root/.cache/uv; \ - rm -rf /root/.rustup; - -# Install Solidity versions -RUN echo "installing Solidity versions" && \ - svm install 0.8.25 && \ - svm install 0.8.19 && \ - svm install 0.8.15 - -# Install Codecov uploader -RUN echo "downloading and verifying Codecov uploader" && \ - curl https://keybase.io/codecovsecurity/pgp_keys.asc | gpg --no-default-keyring --keyring trustedkeys.gpg --import && \ - curl -Os "https://uploader.codecov.io/latest/linux/codecov" && \ - curl -Os "https://uploader.codecov.io/latest/linux/codecov.SHA256SUM" && \ - curl -Os "https://uploader.codecov.io/latest/linux/codecov.SHA256SUM.sig" && \ - gpgv codecov.SHA256SUM.sig codecov.SHA256SUM && \ - shasum -a 256 -c codecov.SHA256SUM || sha256sum -c codecov.SHA256SUM && \ - cp codecov /usr/local/bin/codecov && \ - chmod +x /usr/local/bin/codecov && \ - rm codecov - -# Copy docker buildx -COPY --from=buildx /usr/libexec/docker/cli-plugins/docker-buildx /usr/libexec/docker/cli-plugins/docker-buildx - -# Set up entrypoint -ENTRYPOINT ["/bin/bash", "-c"] - - -############################################################################### -# CI BUILDER (RUST) # -############################################################################### - -FROM base-builder AS rust-builder - -# Install clang & lld -RUN apt-get update && apt-get install -y clang lld - -# Install nightly toolchain -RUN rustup update nightly diff --git a/ops/docker/ci-builder/Dockerfile.dockerignore b/ops/docker/ci-builder/Dockerfile.dockerignore deleted file mode 100644 index 4f44e253194..00000000000 --- a/ops/docker/ci-builder/Dockerfile.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -* -!/.nvmrc -!/mise.toml diff --git a/ops/scripts/ci-docker-tag-op-stack-release.sh b/ops/scripts/ci-docker-tag-op-stack-release.sh index 1de86b749d3..c8dfde1fae2 100755 --- a/ops/scripts/ci-docker-tag-op-stack-release.sh +++ b/ops/scripts/ci-docker-tag-op-stack-release.sh @@ -6,7 +6,7 @@ DOCKER_REPO=$1 GIT_TAG=$2 GIT_SHA=$3 -IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(ci-builder(-rust)?|da-server|proofs-tools|holocene-deployer|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) +IMAGE_NAME=$(echo "$GIT_TAG" | grep -Eow '^(da-server|proofs-tools|holocene-deployer|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)' || true) if [ -z "$IMAGE_NAME" ]; then echo "image name could not be parsed from git tag '$GIT_TAG'" exit 1 diff --git a/packages/contracts-bedrock/README.md b/packages/contracts-bedrock/README.md index 52598632cb6..915fe20dc6c 100644 --- a/packages/contracts-bedrock/README.md +++ b/packages/contracts-bedrock/README.md @@ -84,6 +84,15 @@ OP Stack smart contracts are designed to utilize a single, consistent Solidity v refer to [SOLIDITY_UPGRADES.md](./meta/SOLIDITY_UPGRADES.md) to understand the process for updating to newer Solidity versions. +### Frozen Code + +From time to time we need to ensure that certain files remain frozen, as they may be under audit or +a large PR is in the works and we wish to avoid a large rebase. In order to enforce this, +a hardcoded list of contracts is stored in `./scripts/checks/check-frozen-files.sh`. Any change +which affects the resulting init or source code of that contract will cause a failure in CI. + +In order to remove a file from the freeze it must be removed from the check file. + ## Deployment The smart contracts are deployed using `foundry`. The `DEPLOYMENT_OUTFILE` env var will determine the filepath that the diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol index 4ce5c0fbf62..fca2b7df456 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol @@ -105,14 +105,13 @@ interface IOPContractsManager { address disputeGameFactoryImpl; address anchorStateRegistryImpl; address delayedWETHImpl; - address mips64Impl; + address mipsImpl; } /// @notice The input required to identify a chain for upgrading. struct OpChainConfig { ISystemConfig systemConfigProxy; IProxyAdmin proxyAdmin; - Claim absolutePrestate; } struct AddGameInput { @@ -210,8 +209,6 @@ interface IOPContractsManager { error SuperchainProxyAdminMismatch(); - error PrestateNotSet(); - // -------- Methods -------- function __constructor__( diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 8d8bdeb6d4f..955b8cb4c8e 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -23,6 +23,10 @@ forge-build: forge-build-dev: FOUNDRY_PROFILE=lite forge build +# Builds source contracts only. +build-source: + forge build --skip "/**/test/**" --skip "/**/scripts/**" + # Builds the contracts. build: lint-fix-no-fail forge-build interfaces-check-no-build @@ -176,23 +180,24 @@ kontrol-summary-fp: kontrol-summary-full: kontrol-summary kontrol-summary-fp # Generates ABI snapshots for contracts. -snapshots-abi-storage: +snapshots-abi-storage-no-build: go run ./scripts/autogen/generate-snapshots . +# Generates ABI snapshots for contracts. +snapshots-abi-storage: build-source snapshots-abi-storage-no-build + # Updates the snapshots/semver-lock.json file without building contracts. semver-lock-no-build: go run scripts/autogen/generate-semver-lock/main.go # Updates the snapshots/semver-lock.json file. -semver-lock: build semver-lock-no-build +semver-lock: build-source semver-lock-no-build -# Generates core snapshots without building contracts. Currently just an alias for -# snapshots-abi-storage because we no longer run Kontrol snapshots here. Run -# kontrol-summary-full to build the Kontrol summaries if necessary. -snapshots-no-build: snapshots-abi-storage +# Generates core snapshots without building contracts. +snapshots-no-build: snapshots-abi-storage-no-build semver-lock-no-build gas-snapshot-no-build # Builds contracts and then generates core snapshots. -snapshots: build snapshots-no-build +snapshots: build-source snapshots-no-build ######################################################## @@ -281,13 +286,16 @@ semgrep: semgrep-test: cd ../../ && semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ +# Checks that the frozen code has not been modified. +check-frozen-code: + ./scripts/checks/check-frozen-files.sh + # Runs all checks. check: - @just gas-snapshot-check-no-build \ - semgrep-test-validity-check \ - unused-imports-check-no-build \ - snapshots-check-no-build \ + @just semgrep-test-validity-check \ lint-check \ + snapshots-check-no-build \ + unused-imports-check-no-build \ semver-diff-check-no-build \ validate-deploy-configs \ validate-spacers-no-build \ @@ -303,7 +311,12 @@ pre-pr: clean pre-pr-no-build # Builds, lints, and runs all checks. Sometimes a bad cache causes issues, in which case the above # `pre-pr` is preferred. But in most cases this will be sufficient and much faster then a full build. -pre-pr-no-build: build-go-ffi build lint gas-snapshot-no-build snapshots-no-build semver-lock check +# Steps: +# - Build (dev mode) to confirm that contracts compile +# - Lint contracts +# - Generate snapshots +# - Run all checks to confirm everything is good +pre-pr-no-build: build-dev lint snapshots-no-build check # Fixes linting errors. lint-fix: diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 0c595ffa4df..cb07e4c24e4 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -277,8 +277,6 @@ contract L2Genesis is Deployer { setL2ToL2CrossDomainMessenger(); // 23 setSuperchainWETH(); // 24 setETHLiquidity(); // 25 - setOptimismSuperchainERC20Factory(); // 26 - setOptimismSuperchainERC20Beacon(); // 27 setSuperchainTokenBridge(); // 28 } } @@ -312,15 +310,7 @@ contract L2Genesis is Deployer { /// @notice This predeploy is following the safety invariant #1. function setL2StandardBridge(address payable _l1StandardBridgeProxy) public { - address impl; - if (cfg.useInterop()) { - string memory cname = "L2StandardBridgeInterop"; - impl = Predeploys.predeployToCodeNamespace(Predeploys.L2_STANDARD_BRIDGE); - console.log("Setting %s implementation at: %s", cname, impl); - vm.etch(impl, vm.getDeployedCode(string.concat(cname, ".sol:", cname))); - } else { - impl = _setImplementationCode(Predeploys.L2_STANDARD_BRIDGE); - } + address impl = _setImplementationCode(Predeploys.L2_STANDARD_BRIDGE); IL2StandardBridge(payable(impl)).initialize({ _otherBridge: IStandardBridge(payable(address(0))) }); diff --git a/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh b/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh new file mode 100755 index 00000000000..e9c53697efe --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Grab the directory of the contracts-bedrock package. +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +# Load semver-utils. +# shellcheck source=/dev/null +source "$SCRIPT_DIR/utils/semver-utils.sh" + +# Path to semver-lock.json. +SEMVER_LOCK="snapshots/semver-lock.json" + +# Create a temporary directory. +temp_dir=$(mktemp -d) +trap 'rm -rf "$temp_dir"' EXIT + +# Exit early if semver-lock.json has not changed. +if ! { git diff origin/develop...HEAD --name-only; git diff --name-only; git diff --cached --name-only; } | grep -q "$SEMVER_LOCK"; then + echo "No changes detected in semver-lock.json" + exit 0 +fi + +# Get the upstream semver-lock.json. +if ! git show origin/develop:packages/contracts-bedrock/snapshots/semver-lock.json > "$temp_dir/upstream_semver_lock.json" 2>/dev/null; then + echo "❌ Error: Could not find semver-lock.json in the snapshots/ directory of develop branch" + exit 1 +fi + +# Copy the local semver-lock.json. +cp "$SEMVER_LOCK" "$temp_dir/local_semver_lock.json" + +# Get the changed contracts. +changed_contracts=$(jq -r ' + def changes: + to_entries as $local + | input as $upstream + | $local | map( + select( + .key as $key + | .value != $upstream[$key] + ) + ) | map(.key); + changes[] +' "$temp_dir/local_semver_lock.json" "$temp_dir/upstream_semver_lock.json") + +FROZEN_FILES=( + "src/L1/DataAvailabilityChallenge.sol" + "src/L1/L1CrossDomainMessenger.sol" + "src/L1/L1ERC721Bridge.sol" + "src/L1/L1StandardBridge.sol" + "src/L1/OptimismPortal2.sol" + "src/L1/ProtocolVersions.sol" + "src/L1/SuperchainConfig.sol" + "src/L1/SystemConfig.sol" + "src/dispute/AnchorStateRegistry.sol" + "src/dispute/DelayedWETH.sol" + "src/dispute/DisputeGameFactory.sol" + "src/dispute/FaultDisputeGame.sol" + "src/dispute/PermissionedDisputeGame.sol" + "src/cannon/MIPS.sol" + "src/cannon/MIPS2.sol" +# TODO(#14116): Add MIPS64 back when development is finished +# "src/cannon/MIPS64.sol" + "src/cannon/PreimageOracle.sol" +) + +MATCHED_FILES=() +# Check each changed contract against protected patterns +for contract in $changed_contracts; do + for frozen_file in "${FROZEN_FILES[@]}"; do + if [[ "$contract" == "$frozen_file" ]]; then + MATCHED_FILES+=("$contract") + fi + done +done + + +if [ ${#MATCHED_FILES[@]} -gt 0 ]; then + echo "❌ Error: The following files should not be modified:" + printf ' - %s\n' "${MATCHED_FILES[@]}" + echo "In order to make changes to these contracts, they must be removed from the FROZEN_FILES array in check-frozen-files.sh" + echo "The code freeze is expected to be lifted no later than 2025-02-20." + exit 1 +fi + +echo "✅ No changes detected in frozen files" +exit 0 diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index 2d4c918e230..27aa57a0e4d 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -505,7 +505,7 @@ library ChainAssertions { require(impls.l1StandardBridgeImpl == _impls.L1StandardBridge, "CHECK-OPCM-100"); require(impls.disputeGameFactoryImpl == _impls.DisputeGameFactory, "CHECK-OPCM-110"); require(impls.delayedWETHImpl == _impls.DelayedWETH, "CHECK-OPCM-120"); - require(impls.mips64Impl == address(_mips), "CHECK-OPCM-130"); + require(impls.mipsImpl == address(_mips), "CHECK-OPCM-130"); require(impls.superchainConfigImpl == _impls.SuperchainConfig, "CHECK-OPCM-140"); require(impls.protocolVersionsImpl == _impls.ProtocolVersions, "CHECK-OPCM-150"); diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index ab92c623bc4..9d844e7470a 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -743,14 +743,14 @@ contract Deploy is Deployer { ); } - /// @notice Loads the multithreaded mips absolute prestate from the prestate-proof-mt for devnets otherwise + /// @notice Loads the multithreaded mips absolute prestate from the prestate-proof-mt64 for devnets otherwise /// from the config. function _loadDevnetMtMipsAbsolutePrestate() internal returns (Claim mipsAbsolutePrestate_) { // Fetch the absolute prestate dump - string memory filePath = string.concat(vm.projectRoot(), "/../../op-program/bin/prestate-proof-mt.json"); + string memory filePath = string.concat(vm.projectRoot(), "/../../op-program/bin/prestate-proof-mt64.json"); if (bytes(Process.bash(string.concat("[[ -f ", filePath, " ]] && echo \"present\""))).length == 0) { revert( - "Deploy: MT-Cannon prestate dump not found, generate it with `make cannon-prestate-mt` in the monorepo root" + "Deploy: MT-Cannon prestate dump not found, generate it with `make cannon-prestate-mt64` in the monorepo root" ); } mipsAbsolutePrestate_ = @@ -888,6 +888,8 @@ contract Deploy is Deployer { gameTypeString = "Alphabet"; } else if (rawGameType == GameTypes.OP_SUCCINCT.raw()) { gameTypeString = "OP Succinct"; + } else if (rawGameType == GameTypes.KAILUA.raw()) { + gameTypeString = "Kailua"; } else { gameTypeString = "Unknown"; } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol index 6399115fad7..b0cf2eeb635 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol @@ -290,18 +290,20 @@ contract DeployDisputeGame is Script { IPermissionedDisputeGame impl; if (LibString.eq(_dgi.gameKind(), "FaultDisputeGame")) { impl = IPermissionedDisputeGame( - DeployUtils.create1({ + DeployUtils.createDeterministic({ _name: "FaultDisputeGame", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (args))) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGame.__constructor__, (args))), + _salt: DeployUtils.DEFAULT_SALT }) ); } else { impl = IPermissionedDisputeGame( - DeployUtils.create1({ + DeployUtils.createDeterministic({ _name: "PermissionedDisputeGame", _args: DeployUtils.encodeConstructor( abi.encodeCall(IPermissionedDisputeGame.__constructor__, (args, _dgi.proposer(), _dgi.challenger())) - ) + ), + _salt: DeployUtils.DEFAULT_SALT }) ); } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index e641a8e8f48..42874597e04 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -5,10 +5,13 @@ import { Script } from "forge-std/Script.sol"; import { LibString } from "@solady/utils/LibString.sol"; +// Libraries +import { Chains } from "scripts/libraries/Chains.sol"; + +// Interfaces import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; - import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { IMIPS } from "interfaces/cannon/IMIPS.sol"; @@ -490,7 +493,7 @@ contract DeployImplementations is Script { disputeGameFactoryImpl: address(_dio.disputeGameFactoryImpl()), anchorStateRegistryImpl: address(_dio.anchorStateRegistryImpl()), delayedWETHImpl: address(_dio.delayedWETHImpl()), - mips64Impl: address(_dio.mipsSingleton()) + mipsImpl: address(_dio.mipsSingleton()) }); vm.broadcast(msg.sender); @@ -751,6 +754,14 @@ contract DeployImplementations is Script { function deployMipsSingleton(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { uint256 mipsVersion = _dii.mipsVersion(); IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); + + // We want to ensure that the OPCM for upgrade 13 is deployed with Mips32 on production networks. + if (mipsVersion != 1) { + if (block.chainid == Chains.Mainnet || block.chainid == Chains.Sepolia) { + revert("DeployImplementations: Only Mips32 should be deployed on Mainnet or Sepolia"); + } + } + vm.broadcast(msg.sender); IMIPS singleton = IMIPS( DeployUtils.createDeterministic({ diff --git a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol index 4db9b615801..ec5fad69b94 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol @@ -82,9 +82,10 @@ contract DeployMIPS is Script { IPreimageOracle preimageOracle = IPreimageOracle(_mi.preimageOracle()); vm.broadcast(msg.sender); singleton = IMIPS( - DeployUtils.create1({ + DeployUtils.createDeterministic({ _name: mipsVersion == 1 ? "MIPS" : "MIPS64", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))), + _salt: DeployUtils.DEFAULT_SALT }) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol index 60c696dde41..080e56bca7d 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPCM.s.sol @@ -39,7 +39,7 @@ contract DeployOPCMInput is BaseDeployIO { address internal _disputeGameFactoryImpl; address internal _anchorStateRegistryImpl; address internal _delayedWETHImpl; - address internal _mips64Impl; + address internal _mipsImpl; // Setter for address type function set(bytes4 _sel, address _addr) public { @@ -67,7 +67,7 @@ contract DeployOPCMInput is BaseDeployIO { else if (_sel == this.disputeGameFactoryImpl.selector) _disputeGameFactoryImpl = _addr; else if (_sel == this.anchorStateRegistryImpl.selector) _anchorStateRegistryImpl = _addr; else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = _addr; - else if (_sel == this.mips64Impl.selector) _mips64Impl = _addr; + else if (_sel == this.mipsImpl.selector) _mipsImpl = _addr; else revert("DeployOPCMInput: unknown selector"); } @@ -194,9 +194,9 @@ contract DeployOPCMInput is BaseDeployIO { return _delayedWETHImpl; } - function mips64Impl() public view returns (address) { - require(_mips64Impl != address(0), "DeployOPCMInput: not set"); - return _mips64Impl; + function mipsImpl() public view returns (address) { + require(_mipsImpl != address(0), "DeployOPCMInput: not set"); + return _mipsImpl; } } @@ -242,7 +242,7 @@ contract DeployOPCM is Script { disputeGameFactoryImpl: address(_doi.disputeGameFactoryImpl()), anchorStateRegistryImpl: address(_doi.anchorStateRegistryImpl()), delayedWETHImpl: address(_doi.delayedWETHImpl()), - mips64Impl: address(_doi.mips64Impl()) + mipsImpl: address(_doi.mipsImpl()) }); IOPContractsManager opcm_ = deployOPCM( @@ -324,7 +324,7 @@ contract DeployOPCM is Script { require(implementations.disputeGameFactoryImpl == _doi.disputeGameFactoryImpl(), "OPCMI-180"); require(implementations.anchorStateRegistryImpl == _doi.anchorStateRegistryImpl(), "OPCMI-190"); require(implementations.delayedWETHImpl == _doi.delayedWETHImpl(), "OPCMI-200"); - require(implementations.mips64Impl == _doi.mips64Impl(), "OPCMI-210"); + require(implementations.mipsImpl == _doi.mipsImpl(), "OPCMI-210"); } function etchIOContracts() public returns (DeployOPCMInput doi_, DeployOPCMOutput doo_) { diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol index 24fd0ffcb26..2112d614251 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol @@ -468,8 +468,8 @@ contract DeployOPChain is Script { ); IOPContractsManager opcm = _doi.opcm(); - address mips64Impl = opcm.implementations().mips64Impl; - require(game.vm() == IBigStepper(mips64Impl), "DPG-30"); + address mipsImpl = opcm.implementations().mipsImpl; + require(game.vm() == IBigStepper(mipsImpl), "DPG-30"); require(address(game.weth()) == address(_doo.delayedWETHPermissionedGameProxy()), "DPG-40"); require(address(game.anchorStateRegistry()) == address(_doo.anchorStateRegistryProxy()), "DPG-50"); diff --git a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol index 3e0c0c1a8e5..211281b506d 100644 --- a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol @@ -143,7 +143,7 @@ contract ReadImplementationAddresses is Script { vm.prank(address(0)); _rio.set(_rio.l1StandardBridge.selector, l1SBImpl); - address mipsLogic = _rii.opcm().implementations().mips64Impl; + address mipsLogic = _rii.opcm().implementations().mipsImpl; _rio.set(_rio.mipsSingleton.selector, mipsLogic); address delayedWETH = _rii.opcm().implementations().delayedWETHImpl; diff --git a/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol new file mode 100644 index 00000000000..d238b55edc2 --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/UpgradeOPChain.s.sol @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Script } from "forge-std/Script.sol"; +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; + +contract UpgradeOPChainInput is BaseDeployIO { + address internal _prank; + OPContractsManager internal _opcm; + bytes _opChainConfigs; + + // Setter for OPContractsManager type + function set(bytes4 _sel, address _value) public { + require(address(_value) != address(0), "UpgradeOPCMInput: cannot set zero address"); + + if (_sel == this.prank.selector) _prank = _value; + else if (_sel == this.opcm.selector) _opcm = OPContractsManager(_value); + else revert("UpgradeOPCMInput: unknown selector"); + } + + function set(bytes4 _sel, OPContractsManager.OpChainConfig[] memory _value) public { + require(_value.length > 0, "UpgradeOPCMInput: cannot set empty array"); + + if (_sel == this.opChainConfigs.selector) _opChainConfigs = abi.encode(_value); + else revert("UpgradeOPCMInput: unknown selector"); + } + + function prank() public view returns (address) { + require(address(_prank) != address(0), "UpgradeOPCMInput: prank not set"); + return _prank; + } + + function opcm() public view returns (OPContractsManager) { + require(address(_opcm) != address(0), "UpgradeOPCMInput: not set"); + return _opcm; + } + + function opChainConfigs() public view returns (bytes memory) { + require(_opChainConfigs.length > 0, "UpgradeOPCMInput: not set"); + return _opChainConfigs; + } +} + +contract UpgradeOPChain is Script { + function run(UpgradeOPChainInput _uoci) external { + OPContractsManager opcm = _uoci.opcm(); + OPContractsManager.OpChainConfig[] memory opChainConfigs = + abi.decode(_uoci.opChainConfigs(), (OPContractsManager.OpChainConfig[])); + + // Etch DummyCaller contract. This contract is used to mimic the contract that is used + // as the source of the delegatecall to the OPCM. In practice this will be the governance + // 2/2 or similar. + address prank = _uoci.prank(); + bytes memory code = vm.getDeployedCode("UpgradeOPChain.s.sol:DummyCaller"); + vm.etch(prank, code); + vm.store(prank, bytes32(0), bytes32(uint256(uint160(address(opcm))))); + vm.label(prank, "DummyCaller"); + + // Call into the DummyCaller. This will perform the delegatecall under the hood and + // return the result. + vm.broadcast(msg.sender); + (bool success,) = DummyCaller(prank).upgrade(opChainConfigs); + require(success, "UpgradeChain: upgrade failed"); + } +} + +contract DummyCaller { + address internal _opcmAddr; + + function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) external returns (bool, bytes memory) { + bytes memory data = abi.encodeCall(DummyCaller.upgrade, _opChainConfigs); + (bool success, bytes memory result) = _opcmAddr.delegatecall(data); + return (success, result); + } +} diff --git a/packages/contracts-bedrock/snapshots/.gas-snapshot b/packages/contracts-bedrock/snapshots/.gas-snapshot index b65a63ca795..febca5a38f4 100644 --- a/packages/contracts-bedrock/snapshots/.gas-snapshot +++ b/packages/contracts-bedrock/snapshots/.gas-snapshot @@ -1,13 +1,13 @@ -GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7567) -GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5567) +GasBenchMark_L1BlockInterop_DepositsComplete:test_depositsComplete_benchmark() (gas: 7589) +GasBenchMark_L1BlockInterop_DepositsComplete_Warm:test_depositsComplete_benchmark() (gas: 5589) GasBenchMark_L1BlockInterop_SetValuesInterop:test_setL1BlockValuesInterop_benchmark() (gas: 175700) GasBenchMark_L1BlockInterop_SetValuesInterop_Warm:test_setL1BlockValuesInterop_benchmark() (gas: 5144) -GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158487) -GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7597) +GasBenchMark_L1Block_SetValuesEcotone:test_setL1BlockValuesEcotone_benchmark() (gas: 158509) +GasBenchMark_L1Block_SetValuesEcotone_Warm:test_setL1BlockValuesEcotone_benchmark() (gas: 7619) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 356475) GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2954682) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 551570) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 551615) GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4063763) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 450277) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 450255) GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3496176) -GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 59840) \ No newline at end of file +GasBenchMark_L1StandardBridge_Finalize:test_finalizeETHWithdrawal_benchmark() (gas: 59795) \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 7c1333739ce..1356470d607 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -132,7 +132,7 @@ }, { "internalType": "address", - "name": "mips64Impl", + "name": "mipsImpl", "type": "address" } ], @@ -579,7 +579,7 @@ }, { "internalType": "address", - "name": "mips64Impl", + "name": "mipsImpl", "type": "address" } ], @@ -682,11 +682,6 @@ "internalType": "contract IProxyAdmin", "name": "proxyAdmin", "type": "address" - }, - { - "internalType": "Claim", - "name": "absolutePrestate", - "type": "bytes32" } ], "internalType": "struct OPContractsManager.OpChainConfig[]", @@ -868,11 +863,6 @@ "name": "OnlyUpgradeController", "type": "error" }, - { - "inputs": [], - "name": "PrestateNotSet", - "type": "error" - }, { "inputs": [], "name": "ReservedBitsSet", diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index a4a13362b2a..b9c099add7a 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -16,8 +16,8 @@ "sourceCodeHash": "0xf9ba98657dc235355146e381b654fe3ed766feb7cd87636ec0c9d4c6dd3e1973" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xba9f472eded9b88b53b2c6d46c9d4e9ebf6d348fcc364cae1836f8f5f5717d87", - "sourceCodeHash": "0x7138ea0dbc9dfc4fbdc4fa867fa2bea7be5a2c592ce7391282083ef068cb332e" + "initCodeHash": "0xee8025c5f74ce7d9da5875b5f5b97106af0a307f366cc9559ec27174b3e3d0bc", + "sourceCodeHash": "0x1c37bb41a46a1c0fe6c8cef88037ecc6700675c4cfa8d673349f026ffd919526" }, "src/L1/OptimismPortal2.sol": { "initCodeHash": "0xfaa5f7f911871a00e43024bcf1344cfe846c3cb1aec83d898f5acdd98e3ae223", @@ -37,11 +37,11 @@ }, "src/L1/SuperchainConfig.sol": { "initCodeHash": "0xfce1f0ba88263d9761e458331cae9540278ebae15c1b319c7f941bedc4d85d46", - "sourceCodeHash": "0xa98dfaa25f6594f702ca868a3bf54c6b00694d6a3d5601c04ac5db788d2b6e3c" + "sourceCodeHash": "0x4137b5c7cc4d5a76d9ba840032ad2725f7ea117daa0a9873154a6de43fec2a98" }, "src/L1/SuperchainConfigInterop.sol": { "initCodeHash": "0x407e75ae35995cd8fc10f1057cd933569c2b87f827c6d49d0a902617e1ff2ccb", - "sourceCodeHash": "0xb2da0725bcef3c54c0f6cb74ce2cabaa54526447fbbb40b1a6e1f75f97b6cc90" + "sourceCodeHash": "0xaadd2fc373a6123f64b43baa5cae86b48e2f34540d7e57e9922aab3366a5a938" }, "src/L1/SystemConfig.sol": { "initCodeHash": "0x98c1049952199f55ae63e34ec61a839d43bde52b0892c482ae4246d0c088e826", @@ -152,8 +152,8 @@ "sourceCodeHash": "0x62c820b22c72399efd7688dcf713c34a6ee6821835ec66d5e7b98f33bbbfb209" }, "src/cannon/MIPS64.sol": { - "initCodeHash": "0x7889eff8538b652c2008beb523dc8db7ef52c9af8b6da38adab8e8288bfb7041", - "sourceCodeHash": "0xb710bd6d4844f9ee45f301bb815786619b5e2d6b2f85ae17f39bee4f414f1957" + "initCodeHash": "0x6c433a3fdba3af72d2d0399572e25b1b878ae53fb13cd237e2f4c1964b51644f", + "sourceCodeHash": "0x9d1af96777dc76b215aec7111c4fab2af4116dfefc734908e661ec641421dae2" }, "src/cannon/PreimageOracle.sol": { "initCodeHash": "0x17d3b3df1aaaf7a705b8d48de8a05e6511b910fdafdbe5eb7f7f95ec944fba9a", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 27ca4bca876..d9ca5efaf22 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -111,14 +111,13 @@ contract OPContractsManager is ISemver { address disputeGameFactoryImpl; address anchorStateRegistryImpl; address delayedWETHImpl; - address mips64Impl; + address mipsImpl; } /// @notice The input required to identify a chain for upgrading, along with new prestate hashes struct OpChainConfig { ISystemConfig systemConfigProxy; IProxyAdmin proxyAdmin; - Claim absolutePrestate; } struct AddGameInput { @@ -144,9 +143,9 @@ contract OPContractsManager is ISemver { // -------- Constants and Variables -------- - /// @custom:semver 1.0.0 + /// @custom:semver 1.0.1 function version() public pure virtual returns (string memory) { - return "1.0.0"; + return "1.0.1"; } /// @notice Address of the SuperchainConfig contract shared by all chains. @@ -236,9 +235,6 @@ contract OPContractsManager is ISemver { /// @notice Thrown when the SuperchainProxyAdmin does not match the SuperchainConfig's admin. error SuperchainProxyAdminMismatch(); - /// @notice Thrown when a prestate is not set for a game. - error PrestateNotSet(); - // -------- Methods -------- constructor( @@ -348,7 +344,7 @@ contract OPContractsManager is ISemver { splitDepth: _input.disputeSplitDepth, clockExtension: _input.disputeClockExtension, maxClockDuration: _input.disputeMaxClockDuration, - vm: IBigStepper(implementation.mips64Impl), + vm: IBigStepper(implementation.mipsImpl), weth: IDelayedWETH(payable(address(output.delayedWETHPermissionedGameProxy))), anchorStateRegistry: IAnchorStateRegistry(address(output.anchorStateRegistryProxy)), l2ChainId: _input.l2ChainId @@ -1108,11 +1104,7 @@ contract OPContractsManager is ISemver { // Modify the params with the new anchorStateRegistry and vm values. params.anchorStateRegistry = IAnchorStateRegistry(address(_newAnchorStateRegistryProxy)); - params.vm = IBigStepper(_implementations.mips64Impl); - if (Claim.unwrap(_opChainConfig.absolutePrestate) == bytes32(0)) { - revert PrestateNotSet(); - } - params.absolutePrestate = _opChainConfig.absolutePrestate; + params.vm = IBigStepper(_implementations.mipsImpl); IDisputeGame newGame; if (GameType.unwrap(_gameType) == GameType.unwrap(GameTypes.PERMISSIONED_CANNON)) { diff --git a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol index eb13407f535..36c26bb1a40 100644 --- a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol +++ b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol @@ -11,7 +11,7 @@ import { Storage } from "src/libraries/Storage.sol"; import { ISemver } from "interfaces/universal/ISemver.sol"; /// @custom:proxied true -/// @custom:audit none This contracts is not yet audited. +/// @custom:audit none This contract is not yet audited. /// @title SuperchainConfig /// @notice The SuperchainConfig contract is used to manage configuration of global superchain values. contract SuperchainConfig is Initializable, ISemver { diff --git a/packages/contracts-bedrock/src/L1/SuperchainConfigInterop.sol b/packages/contracts-bedrock/src/L1/SuperchainConfigInterop.sol index f23cc6212fe..26860cab4ae 100644 --- a/packages/contracts-bedrock/src/L1/SuperchainConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/SuperchainConfigInterop.sol @@ -15,7 +15,7 @@ import { ISharedLockbox } from "interfaces/L1/ISharedLockbox.sol"; import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; /// @custom:proxied true -/// @custom:audit none This contracts is not yet audited. +/// @custom:audit none These contracts are not yet audited. /// @title SuperchainConfigInterop /// @notice The SuperchainConfig contract is used to manage configuration of global superchain values. /// The interop version of the contract adds the ability to add dependencies to the dependency set diff --git a/packages/contracts-bedrock/src/cannon/MIPS64.sol b/packages/contracts-bedrock/src/cannon/MIPS64.sol index 564f7bcddfd..f206873fca1 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS64.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS64.sol @@ -63,8 +63,8 @@ contract MIPS64 is ISemver { } /// @notice The semantic version of the MIPS64 contract. - /// @custom:semver 1.0.0 - string public constant version = "1.0.0"; + /// @custom:semver 1.0.0-beta.1 + string public constant version = "1.0.0-beta.1"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol index 88506c3590a..f33a5370555 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol @@ -252,7 +252,8 @@ library MIPS64Instructions { // sll if (_fun == 0x00) { - return signExtend((_rt & U32_MASK) << ((_insn >> 6) & 0x1F), 32); + uint32 shiftAmt = (_insn >> 6) & 0x1F; + return signExtend((_rt << shiftAmt) & U32_MASK, 32); } // srl else if (_fun == 0x02) { @@ -265,7 +266,8 @@ library MIPS64Instructions { } // sllv else if (_fun == 0x04) { - return signExtend((_rt & U32_MASK) << (_rs & 0x1F), 32); + uint64 shiftAmt = _rs & 0x1F; + return signExtend((_rt << shiftAmt) & U32_MASK, 32); } // srlv else if (_fun == 0x6) { diff --git a/packages/contracts-bedrock/src/dispute/lib/Types.sol b/packages/contracts-bedrock/src/dispute/lib/Types.sol index 7faad6be57d..9c3648a8b9a 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Types.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Types.sol @@ -70,6 +70,9 @@ library GameTypes { /// @notice A dispute game type that uses an alphabet vm. /// Not intended for production use. GameType internal constant ALPHABET = GameType.wrap(255); + + /// @notice A dispute game type that uses RISC Zero's Kailua + GameType internal constant KAILUA = GameType.wrap(1337); } /// @title VMStatuses diff --git a/packages/contracts-bedrock/src/libraries/Predeploys.sol b/packages/contracts-bedrock/src/libraries/Predeploys.sol index 5385fbb33f8..c956635f024 100644 --- a/packages/contracts-bedrock/src/libraries/Predeploys.sol +++ b/packages/contracts-bedrock/src/libraries/Predeploys.sol @@ -161,8 +161,6 @@ library Predeploys { || _addr == L1_FEE_VAULT || _addr == SCHEMA_REGISTRY || _addr == EAS || _addr == GOVERNANCE_TOKEN || (_useInterop && _addr == CROSS_L2_INBOX) || (_useInterop && _addr == L2_TO_L2_CROSS_DOMAIN_MESSENGER) || (_useInterop && _addr == SUPERCHAIN_WETH) || (_useInterop && _addr == ETH_LIQUIDITY) - || (_useInterop && _addr == OPTIMISM_SUPERCHAIN_ERC20_FACTORY) - || (_useInterop && _addr == OPTIMISM_SUPERCHAIN_ERC20_BEACON) || (_useInterop && _addr == SUPERCHAIN_TOKEN_BRIDGE); } diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index e4b0d058f65..26c7c9ddd62 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -212,7 +212,6 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { IProxyAdmin superchainProxyAdmin; address upgrader; IOPContractsManager.OpChainConfig[] opChainConfigs; - Claim absolutePrestate; function setUp() public virtual override { super.disableUpgradedFork(); @@ -222,7 +221,10 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { vm.skip(true); } - absolutePrestate = Claim.wrap(bytes32(keccak256("absolutePrestate"))); + skipIfOpsRepoTest( + "OPContractsManager_Upgrade_Harness: cannot test upgrade on superchain ops repo upgrade tests" + ); + proxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(systemConfig))); superchainProxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))); upgrader = proxyAdmin.owner(); @@ -232,11 +234,7 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { vm.etch(upgrader, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); opChainConfigs.push( - IOPContractsManager.OpChainConfig({ - systemConfigProxy: systemConfig, - proxyAdmin: proxyAdmin, - absolutePrestate: absolutePrestate - }) + IOPContractsManager.OpChainConfig({ systemConfigProxy: systemConfig, proxyAdmin: proxyAdmin }) ); // Retrieve the l2ChainId, which was read from the superchain-registry, and saved in Artifacts @@ -255,7 +253,7 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { emit Upgraded(impl); } - function runUpgradeTestAndChecks(address _delegateCaller, bool _superchainUpgrade) public { + function runUpgradeTestAndChecks(address _delegateCaller) public { vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); IOPContractsManager.Implementations memory impls = opcm.implementations(); @@ -297,7 +295,6 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { vm.expectEmit(address(_delegateCaller)); emit Upgraded(l2ChainId, opChainConfigs[0].systemConfigProxy, address(_delegateCaller)); - superchainProxyAdmin = _superchainUpgrade ? superchainProxyAdmin : IProxyAdmin(address(0)); DelegateCaller(_delegateCaller).dcForward( address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) ); @@ -319,22 +316,22 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { assertEq(impls.delayedWETHImpl, EIP1967Helper.getImplementation(address(delayedWETHPermissionedGameProxy))); // Check that the PermissionedDisputeGame is upgraded to the expected version, references - // the correct anchor state and has the mips64impl. + // the correct anchor state and has the mipsImpl. IPermissionedDisputeGame pdg = IPermissionedDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON))); assertEq(ISemver(address(pdg)).version(), "1.4.0"); assertEq(address(pdg.anchorStateRegistry()), address(newAnchorStateRegistryProxy)); - assertEq(address(pdg.vm()), impls.mips64Impl); + assertEq(address(pdg.vm()), impls.mipsImpl); if (address(delayedWeth) != address(0)) { // Check that the PermissionlessDisputeGame is upgraded to the expected version, references - // the correct anchor state and has the mips64impl. + // the correct anchor state and has the mipsImpl. assertEq(impls.delayedWETHImpl, EIP1967Helper.getImplementation(address(delayedWeth))); // Check that the PermissionlessDisputeGame is upgraded to the expected version IFaultDisputeGame fdg = IFaultDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.CANNON))); assertEq(ISemver(address(fdg)).version(), "1.4.0"); assertEq(address(fdg.anchorStateRegistry()), address(newAnchorStateRegistryProxy)); - assertEq(address(fdg.vm()), impls.mips64Impl); + assertEq(address(fdg.vm()), impls.mipsImpl); } } } @@ -346,7 +343,7 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { expectEmitUpgraded(impls.superchainConfigImpl, address(superchainConfig)); expectEmitUpgraded(impls.protocolVersionsImpl, address(protocolVersions)); - runUpgradeTestAndChecks(upgrader, true); + runUpgradeTestAndChecks(upgrader); assertEq(impls.superchainConfigImpl, EIP1967Helper.getImplementation(address(superchainConfig))); assertEq(impls.protocolVersionsImpl, EIP1967Helper.getImplementation(address(protocolVersions))); @@ -354,7 +351,7 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { function test_upgradeOPChainOnly_succeeds() public { // Run the upgrade test and checks - runUpgradeTestAndChecks(upgrader, false); + runUpgradeTestAndChecks(upgrader); } function test_isRcFalseAfterCalledByUpgrader_works() public { @@ -362,7 +359,7 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { bytes memory releaseBytes = bytes(opcm.l1ContractsRelease()); assertEq(Bytes.slice(releaseBytes, releaseBytes.length - 3, 3), "-rc", "release should end with '-rc'"); - runUpgradeTestAndChecks(upgrader, false); + runUpgradeTestAndChecks(upgrader); assertFalse(opcm.isRC(), "isRC should be false"); releaseBytes = bytes(opcm.l1ContractsRelease()); @@ -398,7 +395,7 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { ); // Run the upgrade test and checks - runUpgradeTestAndChecks(_nonUpgradeController, false); + runUpgradeTestAndChecks(_nonUpgradeController); } } @@ -449,12 +446,6 @@ contract OPContractsManager_Upgrade_TestFails is OPContractsManager_Upgrade_Harn address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) ); } - - function test_upgrade_absolutePrestateNotSet_reverts() public { - opChainConfigs[0].absolutePrestate = Claim.wrap(bytes32(0)); - vm.expectRevert(IOPContractsManager.PrestateNotSet.selector); - DelegateCaller(upgrader).dcForward(address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs))); - } } contract OPContractsManager_SetRC_Test is OPContractsManager_Upgrade_Harness { @@ -529,7 +520,7 @@ contract OPContractsManager_AddGameType_Test is Test { disputeGameFactoryImpl: DeployUtils.create1("DisputeGameFactory"), anchorStateRegistryImpl: DeployUtils.create1("AnchorStateRegistry"), delayedWETHImpl: DeployUtils.create1("DelayedWETH", abi.encode(3)), - mips64Impl: DeployUtils.create1("MIPS64", abi.encode(oracle)) + mipsImpl: DeployUtils.create1("MIPS64", abi.encode(oracle)) }); vm.etch(address(superchainConfigProxy), hex"01"); @@ -692,7 +683,7 @@ contract OPContractsManager_AddGameType_Test is Test { disputeClockExtension: Duration.wrap(10800), disputeMaxClockDuration: Duration.wrap(302400), initialBond: 1 ether, - vm: IBigStepper(address(opcm.implementations().mips64Impl)), + vm: IBigStepper(address(opcm.implementations().mipsImpl)), permissioned: permissioned }); } diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index c41a16686d2..fbae6b41524 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -211,6 +211,13 @@ contract SystemConfig_Initialize_TestFail is SystemConfig_Initialize_Test { } contract SystemConfig_Init_ResourceConfig is SystemConfig_Init { + function setUp() public virtual override { + super.setUp(); + skipIfOpsRepoTest( + "SystemConfig_Init_ResourceConfig: cannot test initialization on superchain ops repo upgrade tests" + ); + } + /// @dev Tests that `setResourceConfig` reverts if the min base fee /// is greater than the maximum allowed base fee. function test_setResourceConfig_badMinMax_reverts() external { diff --git a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol index 26556a4eed8..c648e5b110f 100644 --- a/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol +++ b/packages/contracts-bedrock/test/L2/CrossL2Inbox.t.sol @@ -56,7 +56,7 @@ contract CrossL2InboxTest is CommonTest { // Expect a revert with the NoExecutingDeposits selector vm.expectRevert(NoExecutingDeposits.selector); - // Call the executeMessage function + // Call the validateMessage function crossL2Inbox.validateMessage(_id, _messageHash); } } diff --git a/packages/contracts-bedrock/test/L2/L2Genesis.t.sol b/packages/contracts-bedrock/test/L2/L2Genesis.t.sol index 2398ac332df..f445c1758c5 100644 --- a/packages/contracts-bedrock/test/L2/L2Genesis.t.sol +++ b/packages/contracts-bedrock/test/L2/L2Genesis.t.sol @@ -139,8 +139,8 @@ contract L2GenesisTest is Test { // 2 predeploys do not have proxies assertEq(getCodeCount(_path, "Proxy.sol:Proxy"), Predeploys.PREDEPLOY_COUNT - 2); - // 24 proxies have the implementation set if useInterop is true and 17 if useInterop is false - assertEq(getPredeployCountWithSlotSet(_path, Constants.PROXY_IMPLEMENTATION_ADDRESS), _useInterop ? 24 : 17); + // 22 proxies have the implementation set if useInterop is true and 17 if useInterop is false + assertEq(getPredeployCountWithSlotSet(_path, Constants.PROXY_IMPLEMENTATION_ADDRESS), _useInterop ? 22 : 17); // All proxies except 2 have the proxy 1967 admin slot set to the proxy admin assertEq( diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol index 39e84556b35..8bd51e59ec4 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol @@ -19,6 +19,9 @@ contract L2StandardBridgeInterop_Test is CommonTest { /// @notice Test setup. function setUp() public virtual override { + // Skip the test until L2StandardBridgeInterop is integrated again + vm.skip(true); + super.enableInterop(); super.setUp(); } diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Beacon.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Beacon.t.sol index 8a6a7701469..d0cff50b707 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Beacon.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Beacon.t.sol @@ -13,6 +13,9 @@ import { IBeacon } from "@openzeppelin/contracts/proxy/beacon/IBeacon.sol"; contract OptimismSuperchainERC20BeaconTest is CommonTest { /// @notice Sets up the test suite. function setUp() public override { + // Skip the test until OptimismSuperchainERC20Beacon is integrated again + vm.skip(true); + super.enableInterop(); super.setUp(); } diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol index a2f7125fc21..6ea494a4491 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20Factory.t.sol @@ -22,6 +22,9 @@ contract OptimismSuperchainERC20FactoryTest is CommonTest { /// @notice Sets up the test suite. function setUp() public override { + // Skip the test until OptimismSuperchainERC20Factory is integrated again + vm.skip(true); + super.enableInterop(); super.setUp(); } diff --git a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol index 2a63961ce41..84cbf27e061 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol @@ -1,23 +1,24 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.15; +pragma solidity 0.8.25; // Testing utilities -import { CommonTest } from "test/setup/CommonTest.sol"; +import { Test } from "forge-std/Test.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMessenger.sol"; // Target contract +import { SuperchainTokenBridge } from "src/L2/SuperchainTokenBridge.sol"; import { ISuperchainTokenBridge } from "interfaces/L2/ISuperchainTokenBridge.sol"; import { ISuperchainERC20 } from "interfaces/L2/ISuperchainERC20.sol"; -import { IOptimismSuperchainERC20Factory } from "interfaces/L2/IOptimismSuperchainERC20Factory.sol"; import { IERC20 } from "@openzeppelin/contracts/interfaces/IERC20.sol"; import { IERC7802 } from "interfaces/L2/IERC7802.sol"; +import { MockSuperchainERC20Implementation } from "test/mocks/SuperchainERC20Implementation.sol"; /// @title SuperchainTokenBridgeTest /// @notice Contract for testing the SuperchainTokenBridge contract. -contract SuperchainTokenBridgeTest is CommonTest { +contract SuperchainTokenBridgeTest is Test { address internal constant ZERO_ADDRESS = address(0); string internal constant NAME = "SuperchainERC20"; string internal constant SYMBOL = "OSE"; @@ -32,17 +33,20 @@ contract SuperchainTokenBridgeTest is CommonTest { event RelayERC20(address indexed token, address indexed from, address indexed to, uint256 amount, uint256 source); ISuperchainERC20 public superchainERC20; + ISuperchainTokenBridge public superchainTokenBridge; /// @notice Sets up the test suite. - function setUp() public override { - super.enableInterop(); - super.setUp(); - - superchainERC20 = ISuperchainERC20( - IOptimismSuperchainERC20Factory(Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY).deploy( - REMOTE_TOKEN, NAME, SYMBOL, 18 - ) - ); + function setUp() public { + vm.etch(Predeploys.SUPERCHAIN_TOKEN_BRIDGE, address(new SuperchainTokenBridge()).code); + superchainTokenBridge = ISuperchainTokenBridge(Predeploys.SUPERCHAIN_TOKEN_BRIDGE); + superchainERC20 = ISuperchainERC20(address(new MockSuperchainERC20Implementation())); + + // Skip the initialization until OptimismSuperchainERC20Factory is integrated again + // superchainERC20 = ISuperchainERC20( + // IOptimismSuperchainERC20Factory(Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY).deploy( + // REMOTE_TOKEN, NAME, SYMBOL, 18 + // ) + // ); } /// @notice Helper function to setup a mock and expect a call to it. diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index f88de3b338e..d2fc138e91f 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -1,15 +1,19 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Testing import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; + +// Libraries import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +import { Chains } from "scripts/libraries/Chains.sol"; +// Interfaces import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { IMIPS } from "interfaces/cannon/IMIPS.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; - import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; @@ -398,7 +402,7 @@ contract DeployImplementations_Test is Test { dio.checkOutput(dii); } - function testFuzz_run_largeChallengePeriodSeconds_reverts(uint256 _challengePeriodSeconds) public { + function setDefaults() internal { // Set the defaults. dii.set(dii.withdrawalDelaySeconds.selector, withdrawalDelaySeconds); dii.set(dii.minProposalSizeBytes.selector, minProposalSizeBytes); @@ -411,7 +415,10 @@ contract DeployImplementations_Test is Test { dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); dii.set(dii.superchainProxyAdmin.selector, address(superchainProxyAdmin)); + } + function testFuzz_run_largeChallengePeriodSeconds_reverts(uint256 _challengePeriodSeconds) public { + setDefaults(); // Set the challenge period to a value that is too large, using vm.store because the setter // method won't allow it. challengePeriodSeconds = bound(_challengePeriodSeconds, uint256(type(uint64).max) + 1, type(uint256).max); @@ -422,6 +429,19 @@ contract DeployImplementations_Test is Test { vm.expectRevert("DeployImplementationsInput: challengePeriodSeconds too large"); deployImplementations.run(dii, dio); } + + function test_run_deployMipsV1OnMainnetOrSepolia_reverts() public { + setDefaults(); + dii.set(dii.mipsVersion.selector, 2); + + vm.chainId(Chains.Mainnet); + vm.expectRevert("DeployImplementations: Only Mips32 should be deployed on Mainnet or Sepolia"); + deployImplementations.run(dii, dio); + + vm.chainId(Chains.Sepolia); + vm.expectRevert("DeployImplementations: Only Mips32 should be deployed on Mainnet or Sepolia"); + deployImplementations.run(dii, dio); + } } contract DeployImplementationsInterop_Test is DeployImplementations_Test { diff --git a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol index e8b9b9fa3eb..f43f7c1322c 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPCM.t.sol @@ -75,7 +75,7 @@ contract DeployOPCMInput_Test is Test { dii.delayedWETHImpl(); vm.expectRevert("DeployOPCMInput: not set"); - dii.mips64Impl(); + dii.mipsImpl(); } // Below setter tests are split into two parts to avoid stack too deep errors @@ -131,7 +131,7 @@ contract DeployOPCMInput_Test is Test { address disputeGameFactoryImpl = makeAddr("disputeGameFactoryImpl"); address anchorStateRegistryImpl = makeAddr("anchorStateRegistryImpl"); address delayedWETHImpl = makeAddr("delayedWETHImpl"); - address mips64Impl = makeAddr("mips64Impl"); + address mipsImpl = makeAddr("mipsImpl"); dii.set(dii.l1ERC721BridgeImpl.selector, l1ERC721BridgeImpl); dii.set(dii.optimismPortalImpl.selector, optimismPortalImpl); @@ -142,7 +142,7 @@ contract DeployOPCMInput_Test is Test { dii.set(dii.disputeGameFactoryImpl.selector, disputeGameFactoryImpl); dii.set(dii.anchorStateRegistryImpl.selector, anchorStateRegistryImpl); dii.set(dii.delayedWETHImpl.selector, delayedWETHImpl); - dii.set(dii.mips64Impl.selector, mips64Impl); + dii.set(dii.mipsImpl.selector, mipsImpl); assertEq(dii.l1ERC721BridgeImpl(), l1ERC721BridgeImpl, "600"); assertEq(dii.optimismPortalImpl(), optimismPortalImpl, "650"); @@ -152,7 +152,7 @@ contract DeployOPCMInput_Test is Test { assertEq(dii.l1StandardBridgeImpl(), l1StandardBridgeImpl, "850"); assertEq(dii.disputeGameFactoryImpl(), disputeGameFactoryImpl, "900"); assertEq(dii.delayedWETHImpl(), delayedWETHImpl, "950"); - assertEq(dii.mips64Impl(), mips64Impl, "1000"); + assertEq(dii.mipsImpl(), mipsImpl, "1000"); } function test_set_withZeroAddress_reverts() public { @@ -253,7 +253,7 @@ contract DeployOPCMTest is Test { doi.set(doi.disputeGameFactoryImpl.selector, makeAddr("disputeGameFactoryImpl")); doi.set(doi.anchorStateRegistryImpl.selector, makeAddr("anchorStateRegistryImpl")); doi.set(doi.delayedWETHImpl.selector, makeAddr("delayedWETHImpl")); - doi.set(doi.mips64Impl.selector, makeAddr("mips64Impl")); + doi.set(doi.mipsImpl.selector, makeAddr("mipsImpl")); // Etch all addresses with dummy bytecode vm.etch(address(doi.superchainConfig()), hex"01"); @@ -276,7 +276,7 @@ contract DeployOPCMTest is Test { vm.etch(doi.l1StandardBridgeImpl(), hex"01"); vm.etch(doi.disputeGameFactoryImpl(), hex"01"); vm.etch(doi.delayedWETHImpl(), hex"01"); - vm.etch(doi.mips64Impl(), hex"01"); + vm.etch(doi.mipsImpl(), hex"01"); deployOPCM.run(doi, doo); diff --git a/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol new file mode 100644 index 00000000000..d9ddd6c16f2 --- /dev/null +++ b/packages/contracts-bedrock/test/opcm/UpgradeOPChain.t.sol @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Test } from "forge-std/Test.sol"; + +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; + +import { OPContractsManager } from "src/L1/OPContractsManager.sol"; +import { UpgradeOPChain, UpgradeOPChainInput } from "scripts/deploy/UpgradeOPChain.s.sol"; + +contract UpgradeOPChainInput_Test is Test { + UpgradeOPChainInput input; + + function setUp() public { + input = new UpgradeOPChainInput(); + } + + function test_getters_whenNotSet_reverts() public { + vm.expectRevert("UpgradeOPCMInput: prank not set"); + input.prank(); + + vm.expectRevert("UpgradeOPCMInput: not set"); + input.opcm(); + + vm.expectRevert("UpgradeOPCMInput: not set"); + input.opChainConfigs(); + } + + function test_setAddress_succeeds() public { + address mockPrank = makeAddr("prank"); + address mockOPCM = makeAddr("opcm"); + + // Create mock contract at OPCM address + vm.etch(mockOPCM, hex"01"); + + input.set(input.prank.selector, mockPrank); + input.set(input.opcm.selector, mockOPCM); + + assertEq(input.prank(), mockPrank); + assertEq(address(input.opcm()), mockOPCM); + } + + function test_setOpChainConfigs_succeeds() public { + // Create sample OpChainConfig array + OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](2); + + // Setup mock addresses and contracts for first config + address systemConfig1 = makeAddr("systemConfig1"); + address proxyAdmin1 = makeAddr("proxyAdmin1"); + vm.etch(systemConfig1, hex"01"); + vm.etch(proxyAdmin1, hex"01"); + + configs[0] = OPContractsManager.OpChainConfig({ + systemConfigProxy: ISystemConfig(systemConfig1), + proxyAdmin: IProxyAdmin(proxyAdmin1) + }); + + // Setup mock addresses and contracts for second config + address systemConfig2 = makeAddr("systemConfig2"); + address proxyAdmin2 = makeAddr("proxyAdmin2"); + vm.etch(systemConfig2, hex"01"); + vm.etch(proxyAdmin2, hex"01"); + + configs[1] = OPContractsManager.OpChainConfig({ + systemConfigProxy: ISystemConfig(systemConfig2), + proxyAdmin: IProxyAdmin(proxyAdmin2) + }); + + input.set(input.opChainConfigs.selector, configs); + + bytes memory storedConfigs = input.opChainConfigs(); + assertEq(storedConfigs, abi.encode(configs)); + } + + function test_setAddress_withZeroAddress_reverts() public { + vm.expectRevert("UpgradeOPCMInput: cannot set zero address"); + input.set(input.prank.selector, address(0)); + + vm.expectRevert("UpgradeOPCMInput: cannot set zero address"); + input.set(input.opcm.selector, address(0)); + } + + function test_setOpChainConfigs_withEmptyArray_reverts() public { + OPContractsManager.OpChainConfig[] memory emptyConfigs = new OPContractsManager.OpChainConfig[](0); + + vm.expectRevert("UpgradeOPCMInput: cannot set empty array"); + input.set(input.opChainConfigs.selector, emptyConfigs); + } + + function test_set_withInvalidSelector_reverts() public { + vm.expectRevert("UpgradeOPCMInput: unknown selector"); + input.set(bytes4(0xdeadbeef), makeAddr("test")); + + // Create a single config for testing invalid selector + OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); + address mockSystemConfig = makeAddr("systemConfig"); + address mockProxyAdmin = makeAddr("proxyAdmin"); + vm.etch(mockSystemConfig, hex"01"); + vm.etch(mockProxyAdmin, hex"01"); + + configs[0] = OPContractsManager.OpChainConfig({ + systemConfigProxy: ISystemConfig(mockSystemConfig), + proxyAdmin: IProxyAdmin(mockProxyAdmin) + }); + + vm.expectRevert("UpgradeOPCMInput: unknown selector"); + input.set(bytes4(0xdeadbeef), configs); + } +} + +contract MockOPCM { + event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin); + + function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) public { + emit UpgradeCalled(address(_opChainConfigs[0].systemConfigProxy), address(_opChainConfigs[0].proxyAdmin)); + } +} + +contract UpgradeOPChain_Test is Test { + MockOPCM mockOPCM; + UpgradeOPChainInput uoci; + OPContractsManager.OpChainConfig config; + UpgradeOPChain upgradeOPChain; + address prank; + + event UpgradeCalled(address indexed sysCfgProxy, address indexed proxyAdmin); + + function setUp() public virtual { + mockOPCM = new MockOPCM(); + uoci = new UpgradeOPChainInput(); + uoci.set(uoci.opcm.selector, address(mockOPCM)); + config = OPContractsManager.OpChainConfig({ + systemConfigProxy: ISystemConfig(makeAddr("systemConfigProxy")), + proxyAdmin: IProxyAdmin(makeAddr("proxyAdmin")) + }); + OPContractsManager.OpChainConfig[] memory configs = new OPContractsManager.OpChainConfig[](1); + configs[0] = config; + uoci.set(uoci.opChainConfigs.selector, configs); + prank = makeAddr("prank"); + uoci.set(uoci.prank.selector, prank); + upgradeOPChain = new UpgradeOPChain(); + } + + function test_upgrade_succeeds() public { + // UpgradeCalled should be emitted by the prank since it's a delegate call. + vm.expectEmit(true, true, false, false, address(prank)); + emit UpgradeCalled(address(config.systemConfigProxy), address(config.proxyAdmin)); + upgradeOPChain.run(uoci); + } +} diff --git a/packages/contracts-bedrock/test/setup/ForkLive.s.sol b/packages/contracts-bedrock/test/setup/ForkLive.s.sol index 4b29777c4b7..61b9e74f0fb 100644 --- a/packages/contracts-bedrock/test/setup/ForkLive.s.sol +++ b/packages/contracts-bedrock/test/setup/ForkLive.s.sol @@ -12,7 +12,7 @@ import { Deployer } from "scripts/deploy/Deployer.sol"; import { Deploy } from "scripts/deploy/Deploy.s.sol"; // Libraries -import { GameTypes, Claim } from "src/dispute/lib/Types.sol"; +import { GameTypes } from "src/dispute/lib/Types.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Interfaces @@ -37,6 +37,8 @@ import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.so contract ForkLive is Deployer { using stdToml for string; + bool public useOpsRepo; + /// @notice Returns the base chain name to use for forking /// @return The base chain name as a string function baseChain() internal view returns (string memory) { @@ -58,7 +60,7 @@ contract ForkLive is Deployer { function run() public { string memory superchainOpsAllocsPath = vm.envOr("SUPERCHAIN_OPS_ALLOCS_PATH", string("")); - bool useOpsRepo = bytes(superchainOpsAllocsPath).length > 0; + useOpsRepo = bytes(superchainOpsAllocsPath).length > 0; if (useOpsRepo) { console.log("ForkLive: loading state from %s", superchainOpsAllocsPath); // Set the resultant state from the superchain ops repo upgrades. @@ -72,17 +74,16 @@ contract ForkLive is Deployer { } else { // Read the superchain registry and save the addresses to the Artifacts contract. _readSuperchainRegistry(); - // Now deploy the updated OPCM and implementations of the contracts + // Now deploy the updated OPCM and implementations of the contracts. _deployNewImplementations(); } // Now upgrade the contracts (if the config is set to do so) - if (cfg.useUpgradedFork()) { - require(!useOpsRepo, "ForkLive: cannot upgrade and use ops repo"); + if (useOpsRepo) { + console.log("ForkLive: using ops repo to upgrade"); + } else if (cfg.useUpgradedFork()) { console.log("ForkLive: upgrading"); _upgrade(); - } else if (useOpsRepo) { - console.log("ForkLive: using ops repo to upgrade"); } } @@ -170,11 +171,7 @@ contract ForkLive is Deployer { vm.label(upgrader, "ProxyAdmin Owner"); IOPContractsManager.OpChainConfig[] memory opChains = new IOPContractsManager.OpChainConfig[](1); - opChains[0] = IOPContractsManager.OpChainConfig({ - systemConfigProxy: systemConfig, - proxyAdmin: proxyAdmin, - absolutePrestate: Claim.wrap(bytes32(keccak256("absolutePrestate"))) - }); + opChains[0] = IOPContractsManager.OpChainConfig({ systemConfigProxy: systemConfig, proxyAdmin: proxyAdmin }); // TODO Migrate from DelegateCaller to a Safe to reduce risk of mocks not properly // reflecting the production system. diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index 5700356d4a5..7a79fba6d2d 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -187,6 +187,14 @@ contract Setup { } } + /// @dev Skips tests when running against a forked production network using the superchain ops repo. + function skipIfOpsRepoTest(string memory message) public { + if (forkLive.useOpsRepo()) { + vm.skip(true); + console.log(string.concat("Skipping ops repo test: ", message)); + } + } + /// @dev Returns early when running against a forked production network. Useful for allowing a portion of a test /// to run. function returnIfForkTest(string memory message) public view {