diff --git a/.circleci/config.yml b/.circleci/config.yml index 9fbed7bde44..089b13c337f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,6 +26,9 @@ parameters: reproducibility_dispatch: type: boolean default: false + diff_asterisc_bytecode_dispatch: + type: boolean + default: false kontrol_dispatch: type: boolean default: false @@ -41,6 +44,9 @@ parameters: publish_contract_artifacts_dispatch: type: boolean default: false + stale_check_dispatch: + type: boolean + default: false orbs: go: circleci/go@1.8.0 @@ -239,6 +245,43 @@ jobs: command: python3 maketests.py && git diff --exit-code working_directory: cannon/mipsevm/tests/open_mips_tests + diff-asterisc-bytecode: + docker: + - image: <> + resource_class: medium + steps: + - checkout + - run: + name: Check `RISCV.sol` bytecode + working_directory: packages/contracts-bedrock + command: | + # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` + ASTERISC_REV="$(cat ../../versions.json | jq -r .asterisc)" + REMOTE_ASTERISC_PATH="./src/asterisc/RISCV_Remote.sol" + git clone https://github.com/ethereum-optimism/asterisc \ + -b $ASTERISC_REV && \ + cp ./asterisc/rvsol/src/RISCV.sol $REMOTE_ASTERISC_PATH + + # Replace import paths + sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH + # Replace contract name + sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH + + # Install deps + forge install + + # Diff bytecode, with both contracts compiled in the local environment. + REMOTE_ASTERISC_CODE="$(forge inspect RISCV_Remote bytecode | tr -d '\n')" + LOCAL_ASTERISC_CODE="$(forge inspect RISCV bytecode | tr -d '\n')" + if [ "$REMOTE_ASTERISC_CODE" != "$LOCAL_ASTERISC_CODE" ]; then + echo "Asterisc bytecode mismatch. Local version does not match remote. Diff:" + diff <(echo "$REMOTE_ASTERISC_CODE") <(echo "$LOCAL_ASTERISC_CODE") + else + echo "Asterisc version up to date." + fi + - notify-failures-on-develop: + mentions: "@clabby @proofs-team" + contracts-bedrock-build: machine: true resource_class: ethereum-optimism/latitude-1 @@ -347,7 +390,7 @@ jobs: machine: image: <> resource_class: "<>" - docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages + docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages steps: - checkout - attach_workspace: @@ -458,7 +501,7 @@ jobs: docker save -o /tmp/docker_images/<>.tar $IMAGE_NAME - persist_to_workspace: root: /tmp/docker_images - paths: # only write the one file, to avoid concurrent workspace-file additions + paths: # only write the one file, to avoid concurrent workspace-file additions - "<>.tar" - when: condition: "<>" @@ -471,11 +514,11 @@ jobs: condition: or: - and: - - "<>" - - "<>" + - "<>" + - "<>" - and: - - "<>" - - equal: [develop, << pipeline.git.branch >>] + - "<>" + - equal: [develop, << pipeline.git.branch >>] steps: - gcp-oidc-authenticate: service_account_email: GCP_SERVICE_ATTESTOR_ACCOUNT_EMAIL @@ -867,6 +910,7 @@ jobs: export ENABLE_ANVIL=true export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" + export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" gotestsum --format=testname \ --junitfile=./tmp/test-results/results.xml \ @@ -1000,8 +1044,7 @@ jobs: steps: - checkout - setup_remote_docker - - run: - make -C op-program verify-reproducibility + - run: make -C op-program verify-reproducibility - notify-failures-on-develop: mentions: "@proofs-team" @@ -1053,7 +1096,7 @@ jobs: - checkout - unless: condition: - equal: [ "develop", << pipeline.git.branch >> ] + equal: ["develop", << pipeline.git.branch >>] steps: - run: # Scan changed files in PRs, block on new issues only (existing issues ignored) @@ -1109,7 +1152,7 @@ jobs: paths: - "/go/pkg/mod" - bedrock-go-tests: # just a helper, that depends on all the actual test jobs + bedrock-go-tests: # just a helper, that depends on all the actual test jobs docker: # Use a smaller base image to avoid pulling the huge ci-builder # image which is not needed for this job and sometimes misses @@ -1271,17 +1314,29 @@ jobs: command: | goreleaser release --clean -f ./<>/<> + stale-check: + docker: + - image: cimg/python:3.11 + steps: + - run: + name: Run Stale Check Script + command: | + git clone --branch main --depth 1 https://github.com/ethereum-optimism/circleci-utils.git /tmp/circleci-utils + cd /tmp/circleci-utils/stale-check + pip3 install -r requirements.txt + python3 stale-check.py --repo "ethereum-optimism/${CIRCLE_PROJECT_REPONAME}" --github-token "${STALE_GITHUB_TOKEN}" + workflows: main: when: and: - or: # Trigger on new commits - - equal: [ webhook, << pipeline.trigger_source >> ] + - equal: [webhook, << pipeline.trigger_source >>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.main_dispatch >> ] + - equal: [true, << pipeline.parameters.main_dispatch >>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - go-mod-download - contracts-bedrock-build: @@ -1304,7 +1359,7 @@ workflows: # Heavily fuzz any fuzz tests within added or modified test files. name: contracts-bedrock-tests-heavy-fuzz-modified test_parallelism: 1 - test_list: git diff origin/develop...HEAD --name-only -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' + test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' test_timeout: 1h test_profile: ciheavy - contracts-bedrock-coverage @@ -1425,7 +1480,7 @@ workflows: notify: true matrix: parameters: - mips_word_size: [ 32, 64 ] + mips_word_size: [32, 64] - cannon-build-test-vectors - todo-issues: name: todo-issues-check @@ -1434,8 +1489,7 @@ workflows: name: shell-check # We don't need the `exclude` key as the orb detects the `.shellcheckrc` dir: . - ignore-dirs: - ./packages/contracts-bedrock/lib + ignore-dirs: ./packages/contracts-bedrock/lib go-release-deployer: jobs: @@ -1452,7 +1506,7 @@ workflows: release: when: not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: # Wait for approval on the release - hold: @@ -1545,7 +1599,7 @@ workflows: scheduled-todo-issues: when: - equal: [ build_four_hours, <> ] + equal: [build_four_hours, <>] jobs: - todo-issues: name: todo-issue-checks @@ -1554,7 +1608,7 @@ workflows: scheduled-fpp: when: - equal: [ build_hourly, <> ] + equal: [build_hourly, <>] jobs: - fpp-verify: context: @@ -1564,8 +1618,9 @@ workflows: develop-publish-contract-artifacts: when: or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: + [true, <>] jobs: - publish-contract-artifacts @@ -1573,10 +1628,10 @@ workflows: when: and: - or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: [true, <>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - go-mod-download - cannon-prestate @@ -1606,10 +1661,10 @@ workflows: when: and: - or: - - equal: [ "develop", <> ] - - equal: [ true, <> ] + - equal: ["develop", <>] + - equal: [true, <>] - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] + equal: [scheduled_pipeline, << pipeline.trigger_source >>] jobs: - kontrol-tests: context: @@ -1619,8 +1674,8 @@ workflows: scheduled-cannon-full-tests: when: or: - - equal: [ build_four_hours, <> ] - - equal: [ true, << pipeline.parameters.cannon_full_test_dispatch >> ] + - equal: [build_four_hours, <>] + - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] jobs: - contracts-bedrock-build: skip_pattern: test @@ -1632,14 +1687,14 @@ workflows: - slack matrix: parameters: - mips_word_size: [ 32, 64 ] + mips_word_size: [32, 64] scheduled-docker-publish: when: or: - - equal: [ build_hourly, <> ] + - equal: [build_hourly, <>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.docker_publish_dispatch >> ] + - equal: [true, << pipeline.parameters.docker_publish_dispatch >>] jobs: - docker-build: matrix: @@ -1687,13 +1742,33 @@ workflows: - oplabs-gcr - slack + scheduled-diff-asterisc-bytecode: + when: + or: + - equal: [build_daily, <>] + # Trigger on manual triggers if explicitly requested + - equal: [true, <>] + jobs: + - diff-asterisc-bytecode: + context: + - slack + scheduled-preimage-reproducibility: when: or: - - equal: [build_daily, <> ] + - equal: [build_daily, <>] # Trigger on manual triggers if explicitly requested - - equal: [ true, << pipeline.parameters.reproducibility_dispatch >> ] + - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] jobs: - preimage-reproducibility: - context: - slack + context: slack + + scheduled-stale-check: + when: + or: + - equal: [build_daily, <>] + # Trigger on manual triggers if explicitly requested + - equal: [true, << pipeline.parameters.stale_check_dispatch >>] + jobs: + - stale-check: + context: github-token-stale-check diff --git a/.github/workflows/close-stale.yml b/.github/workflows/close-stale.yml deleted file mode 100644 index 68e8b4ec82c..00000000000 --- a/.github/workflows/close-stale.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: 'Close stale issues and PRs' -on: - schedule: - - cron: '30 1 * * *' - -jobs: - stale: - runs-on: ubuntu-latest - steps: - - uses: actions/stale@v9 - with: - stale-pr-message: 'This PR is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 5 days.' - stale-issue-label: 'S-stale' - exempt-pr-labels: 'S-exempt-stale' - days-before-issue-stale: 999 - days-before-pr-stale: 14 - days-before-close: 5 - repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tag-service.yml b/.github/workflows/tag-service.yml deleted file mode 100644 index 439b48f13d4..00000000000 --- a/.github/workflows/tag-service.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Tag Service - -on: - workflow_dispatch: - inputs: - bump: - description: 'How much to bump the version by' - required: true - type: choice - options: - - major - - minor - - patch - - prerelease - - finalize-prerelease - service: - description: 'Which service to release' - required: true - type: choice - options: - - ci-builder - - ci-builder-rust - - op-node - - op-batcher - - op-proposer - - op-challenger - - op-program - - op-dispute-mon - - op-ufm - - da-server - - op-contracts - - op-conductor - prerelease: - description: Increment major/minor/patch as prerelease? - required: false - type: boolean - default: false - -permissions: - contents: write - -jobs: - release: - runs-on: ubuntu-latest - environment: op-stack-production - - steps: - - uses: actions/checkout@v4 - - name: Fetch tags - run: git fetch --tags origin --force - - name: Setup Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install deps - run: pip install -r requirements.txt - working-directory: ops/tag-service - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'false' }} - - run: ops/tag-service/tag-service.py --bump="$BUMP" --service="$SERVICE" --pre-release - env: - INPUT_GITHUB_TOKEN: ${{ github.token }} - BUMP: ${{ github.event.inputs.bump }} - SERVICE: ${{ github.event.inputs.service }} - if: ${{ github.event.inputs.prerelease == 'true' }} diff --git a/Makefile b/Makefile index 3f6f759c59f..9830a3060cb 100644 --- a/Makefile +++ b/Makefile @@ -147,8 +147,8 @@ cannon-prestate: op-program cannon ## Generates prestate using cannon and op-pro mv op-program/bin/0.json op-program/bin/prestate-proof.json .PHONY: cannon-prestate -cannon-prestate-mt: op-program cannon ## Generates prestate using cannon and op-program in the multithreaded cannon format - ./cannon/bin/cannon load-elf --type multithreaded --path op-program/bin/op-program-client.elf --out op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json +cannon-prestate-mt: op-program cannon ## Generates prestate using cannon and op-program in the multithreaded64 cannon format + ./cannon/bin/cannon load-elf --type multithreaded64 --path op-program/bin/op-program-client64.elf --out op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json ./cannon/bin/cannon run --proof-at '=0' --stop-at '=1' --input op-program/bin/prestate-mt.bin.gz --meta op-program/bin/meta-mt.json --proof-fmt 'op-program/bin/%d-mt.json' --output "" mv op-program/bin/0-mt.json op-program/bin/prestate-proof-mt.json .PHONY: cannon-prestate-mt diff --git a/cannon/mipsevm/exec/mips_instructions.go b/cannon/mipsevm/exec/mips_instructions.go index c8e27e63df0..e0d91893d1a 100644 --- a/cannon/mipsevm/exec/mips_instructions.go +++ b/cannon/mipsevm/exec/mips_instructions.go @@ -169,7 +169,7 @@ func SignExtendImmediate(insn uint32) Word { func assertMips64(insn uint32) { if arch.IsMips32 { - panic(fmt.Sprintf("invalid instruction: %x", insn)) + panic(fmt.Sprintf("invalid instruction: 0x%08x", insn)) } } @@ -327,7 +327,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem assertMips64(insn) return Word(int64(rt) >> (((insn >> 6) & 0x1f) + 32)) default: - panic(fmt.Sprintf("invalid instruction: %x", insn)) + panic(fmt.Sprintf("invalid instruction: 0x%08x", insn)) } } else { switch opcode { diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index ec079e50c8c..6add2cc020a 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -34,29 +34,178 @@ func TestInstrumentedState_Claim(t *testing.T) { testutil.RunVMTest_Claim(t, CreateInitialState, vmFactory, false) } -func TestInstrumentedState_MultithreadedProgram(t *testing.T) { +func TestInstrumentedState_UtilsCheck(t *testing.T) { + // Sanity check that test running utilities will return a non-zero exit code on failure t.Parallel() - state, _ := testutil.LoadELFProgram(t, testutil.ProgramPath("multithreaded"), CreateInitialState, false) - oracle := testutil.StaticOracle(t, []byte{}) - - var stdOutBuf, stdErrBuf bytes.Buffer - us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), nil) - for i := 0; i < 2_000_000; i++ { - if us.GetState().GetExited() { - break - } - _, err := us.Step(false) - require.NoError(t, err) + cases := []struct { + name string + expectedOutput string + }{ + {name: "utilscheck", expectedOutput: "Test failed: ShouldFail"}, + {name: "utilscheck2", expectedOutput: "Test failed: ShouldFail (subtest 2)"}, + {name: "utilscheck3", expectedOutput: "Test panicked: ShouldFail (panic test)"}, + {name: "utilscheck4", expectedOutput: "Test panicked: ShouldFail"}, } - t.Logf("Completed in %d steps", state.Step) - require.True(t, state.Exited, "must complete program") - require.Equal(t, uint8(0), state.ExitCode, "exit with 0") - require.Contains(t, "waitgroup result: 42", stdErrBuf.String()) - require.Contains(t, "channels result: 1234", stdErrBuf.String()) - require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(c.name), CreateInitialState, false) + oracle := testutil.StaticOracle(t, []byte{}) + + var stdOutBuf, stdErrBuf bytes.Buffer + us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta) + + for i := 0; i < 1_000_000; i++ { + if us.GetState().GetExited() { + break + } + _, err := us.Step(false) + require.NoError(t, err) + } + t.Logf("Completed in %d steps", state.Step) + + require.True(t, state.Exited, "must complete program") + require.Equal(t, uint8(1), state.ExitCode, "exit with 1") + require.Contains(t, stdOutBuf.String(), c.expectedOutput) + require.NotContains(t, stdOutBuf.String(), "Passed test that should have failed") + require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + }) + } } +func TestInstrumentedState_MultithreadedProgram(t *testing.T) { + if os.Getenv("SKIP_SLOW_TESTS") == "true" { + t.Skip("Skipping slow test because SKIP_SLOW_TESTS is enabled") + } + + t.Parallel() + cases := []struct { + name string + expectedOutput []string + programName string + steps int + }{ + { + name: "general concurrency test", + expectedOutput: []string{ + "waitgroup result: 42", + "channels result: 1234", + "GC complete!", + }, + programName: "mt-general", + steps: 5_000_000, + }, + { + name: "atomic test", + expectedOutput: []string{ + "Atomic tests passed", + }, + programName: "mt-atomic", + steps: 350_000_000, + }, + { + name: "waitgroup test", + expectedOutput: []string{ + "WaitGroup tests passed", + }, + programName: "mt-wg", + steps: 15_000_000, + }, + { + name: "mutex test", + expectedOutput: []string{ + "Mutex test passed", + }, + programName: "mt-mutex", + steps: 5_000_000, + }, + { + name: "cond test", + expectedOutput: []string{ + "Cond test passed", + }, + programName: "mt-cond", + steps: 5_000_000, + }, + { + name: "rwmutex test", + expectedOutput: []string{ + "RWMutex test passed", + }, + programName: "mt-rwmutex", + steps: 5_000_000, + }, + { + name: "once test", + expectedOutput: []string{ + "Once test passed", + }, + programName: "mt-once", + steps: 5_000_000, + }, + { + name: "oncefunc test", + expectedOutput: []string{ + "OnceFunc tests passed", + }, + programName: "mt-oncefunc", + steps: 15_000_000, + }, + { + name: "map test", + expectedOutput: []string{ + "Map test passed", + }, + programName: "mt-map", + steps: 100_000_000, + }, + { + name: "pool test", + expectedOutput: []string{ + "Pool test passed", + }, + programName: "mt-pool", + steps: 50_000_000, + }, + { + name: "value test", + expectedOutput: []string{ + "Value tests passed", + }, + programName: "mt-value", + steps: 3_000_000, + }, + } + + for _, test := range cases { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + state, meta := testutil.LoadELFProgram(t, testutil.ProgramPath(test.programName), CreateInitialState, false) + oracle := testutil.StaticOracle(t, []byte{}) + + var stdOutBuf, stdErrBuf bytes.Buffer + us := NewInstrumentedState(state, oracle, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta) + + for i := 0; i < test.steps; i++ { + if us.GetState().GetExited() { + break + } + _, err := us.Step(false) + require.NoError(t, err) + } + t.Logf("Completed in %d steps", state.Step) + + require.True(t, state.Exited, "must complete program") + require.Equal(t, uint8(0), state.ExitCode, "exit with 0") + for _, expected := range test.expectedOutput { + require.Contains(t, stdOutBuf.String(), expected) + } + require.Equal(t, "", stdErrBuf.String(), "should not print any errors") + }) + } +} func TestInstrumentedState_Alloc(t *testing.T) { if os.Getenv("SKIP_SLOW_TESTS") == "true" { t.Skip("Skipping slow test because SKIP_SLOW_TESTS is enabled") diff --git a/cannon/scripts/build-legacy-cannons.sh b/cannon/scripts/build-legacy-cannons.sh index 62b54383984..3df0a43c31e 100755 --- a/cannon/scripts/build-legacy-cannons.sh +++ b/cannon/scripts/build-legacy-cannons.sh @@ -3,7 +3,7 @@ set -euo pipefail SCRIPTS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # This script builds a version of the cannon executable that includes support for both current and legacy state versions. -# Each cannon release is built +# Each cannon release is built separately. TMP_DIR=$(mktemp -d) function cleanup() { diff --git a/cannon/testdata/example/mt-atomic/atomic_test_copy.go b/cannon/testdata/example/mt-atomic/atomic_test_copy.go new file mode 100644 index 00000000000..e0cd1ebd69f --- /dev/null +++ b/cannon/testdata/example/mt-atomic/atomic_test_copy.go @@ -0,0 +1,2567 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/atomic_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "fmt" + "reflect" + "runtime" + "runtime/debug" + "strings" + . "sync/atomic" + "testing" + "unsafe" + + "utils/testutil" +) + +// Tests of correct behavior, without contention. +// (Does the function work as advertised?) +// +// Test that the Add functions add correctly. +// Test that the CompareAndSwap functions actually +// do the comparison and the swap correctly. +// +// The loop over power-of-two values is meant to +// ensure that the operations apply to the full word size. +// The struct fields x.before and x.after check that the +// operations do not extend past the full word size. + +const ( + magic32 = 0xdedbeef + magic64 = 0xdeddeadbeefbeef +) + +func TestSwapInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := SwapInt32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := SwapUint32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := SwapInt64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := SwapUint64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestSwapUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapUintptr(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestSwapUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Swap(delta) + if x.i.Load() != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +var global [1024]byte + +func testPointers() []unsafe.Pointer { + var pointers []unsafe.Pointer + // globals + for i := 0; i < 10; i++ { + pointers = append(pointers, unsafe.Pointer(&global[1< delta; delta += delta { + k := AddInt32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := AddUint32(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestAddInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := AddInt64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := AddUint64(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestAddUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := AddUintptr(&x.i, delta) + j += delta + if x.i != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestAddUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Add(delta) + j += delta + if x.i.Load() != j || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i.Load(), j, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for val := int32(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUint32(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUint32(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for val := uint32(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestCompareAndSwapInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapInt64(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapInt64(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for val := int64(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func testCompareAndSwapUint64(t testing.TB, cas func(*uint64, uint64, uint64) bool) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i = val + if !cas(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if cas(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapUint64(t *testutil.TestRunner) { + testCompareAndSwapUint64(t, CompareAndSwapUint64) +} + +func TestCompareAndSwapUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for val := uint64(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestCompareAndSwapUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i = val + if !CompareAndSwapUintptr(&x.i, val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + x.i = val + 1 + if CompareAndSwapUintptr(&x.i, val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for val := uintptr(1); val+val > val; val += val { + x.i.Store(val) + if !x.i.CompareAndSwap(val, val+1) { + t.Fatalf("should have swapped %#x %#x", val, val+1) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + x.i.Store(val + 1) + if x.i.CompareAndSwap(val, val+2) { + t.Fatalf("should not have swapped %#x %#x", val, val+2) + } + if x.i.Load() != val+1 { + t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i.Load(), val+1) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uintptr(magicptr), uintptr(magicptr)) + } +} + +func TestCompareAndSwapPointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + q := unsafe.Pointer(new(byte)) + for _, p := range testPointers() { + x.i = p + if !CompareAndSwapPointer(&x.i, p, q) { + t.Fatalf("should have swapped %p %p", p, q) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + if CompareAndSwapPointer(&x.i, p, nil) { + t.Fatalf("should not have swapped %p nil", p) + } + if x.i != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i, q) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestCompareAndSwapPointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + q := new(byte) + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + if !x.i.CompareAndSwap(p, q) { + t.Fatalf("should have swapped %p %p", p, q) + } + if x.i.Load() != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q) + } + if x.i.CompareAndSwap(p, nil) { + t.Fatalf("should not have swapped %p nil", p) + } + if x.i.Load() != q { + t.Fatalf("wrong x.i after swap: x.i=%p want %p", x.i.Load(), q) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := LoadInt32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + want := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := LoadUint32(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + want := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestLoadInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := LoadInt64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + want := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := LoadUint64(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + want := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestLoadUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := LoadUintptr(&x.i) + if k != x.i { + t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k) + } + x.i += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + want := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := x.i.Load() + if k != want { + t.Fatalf("delta=%d i=%d k=%d want=%d", delta, x.i.Load(), k, want) + } + x.i.Store(k + delta) + want = k + delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + x.i = p + k := LoadPointer(&x.i) + if k != p { + t.Fatalf("p=%x k=%x", p, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestLoadPointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + k := x.i.Load() + if k != p { + t.Fatalf("p=%x k=%x", p, k) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreInt32(t *testutil.TestRunner) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + StoreInt32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt32Method(t *testutil.TestRunner) { + var x struct { + before int32 + i Int32 + after int32 + } + x.before = magic32 + x.after = magic32 + v := int32(0) + for delta := int32(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32(t *testutil.TestRunner) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + StoreUint32(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreUint32Method(t *testutil.TestRunner) { + var x struct { + before uint32 + i Uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + v := uint32(0) + for delta := uint32(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestStoreInt64(t *testutil.TestRunner) { + var x struct { + before int64 + i int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + StoreInt64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreInt64Method(t *testutil.TestRunner) { + var x struct { + before int64 + i Int64 + after int64 + } + magic64 := int64(magic64) + x.before = magic64 + x.after = magic64 + v := int64(0) + for delta := int64(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUint64(t *testutil.TestRunner) { + var x struct { + before uint64 + i uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + StoreUint64(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUint64Method(t *testutil.TestRunner) { + var x struct { + before uint64 + i Uint64 + after uint64 + } + magic64 := uint64(magic64) + x.before = magic64 + x.after = magic64 + v := uint64(0) + for delta := uint64(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic64, magic64) + } +} + +func TestStoreUintptr(t *testutil.TestRunner) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + StoreUintptr(&x.i, v) + if x.i != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStoreUintptrMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + v := uintptr(0) + for delta := uintptr(1); delta+delta > delta; delta += delta { + x.i.Store(v) + if x.i.Load() != v { + t.Fatalf("delta=%d i=%d v=%d", delta, x.i.Load(), v) + } + v += delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointer(t *testutil.TestRunner) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + StorePointer(&x.i, p) + if x.i != p { + t.Fatalf("x.i=%p p=%p", x.i, p) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestStorePointerMethod(t *testutil.TestRunner) { + var x struct { + before uintptr + i Pointer[byte] + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + for _, p := range testPointers() { + p := (*byte)(p) + x.i.Store(p) + if x.i.Load() != p { + t.Fatalf("x.i=%p p=%p", x.i.Load(), p) + } + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +// Tests of correct behavior, with contention. +// (Is the function atomic?) +// +// For each function, we write a "hammer" function that repeatedly +// uses the atomic operation to add 1 to a value. After running +// multiple hammers in parallel, check that we end with the correct +// total. +// Swap can't add 1, so it uses a different scheme. +// The functions repeatedly generate a pseudo-random number such that +// low bits are equal to high bits, swap, check that the old value +// has low and high bits equal. + +var hammer32 = map[string]func(*uint32, int){ + "SwapInt32": hammerSwapInt32, + "SwapUint32": hammerSwapUint32, + "SwapUintptr": hammerSwapUintptr32, + "AddInt32": hammerAddInt32, + "AddUint32": hammerAddUint32, + "AddUintptr": hammerAddUintptr32, + "CompareAndSwapInt32": hammerCompareAndSwapInt32, + "CompareAndSwapUint32": hammerCompareAndSwapUint32, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32, + + "SwapInt32Method": hammerSwapInt32Method, + "SwapUint32Method": hammerSwapUint32Method, + "SwapUintptrMethod": hammerSwapUintptr32Method, + "AddInt32Method": hammerAddInt32Method, + "AddUint32Method": hammerAddUint32Method, + "AddUintptrMethod": hammerAddUintptr32Method, + "CompareAndSwapInt32Method": hammerCompareAndSwapInt32Method, + "CompareAndSwapUint32Method": hammerCompareAndSwapUint32Method, + "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr32Method, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) != 0 { + // 64-bit system; clear uintptr tests + delete(hammer32, "SwapUintptr") + delete(hammer32, "AddUintptr") + delete(hammer32, "CompareAndSwapUintptr") + delete(hammer32, "SwapUintptrMethod") + delete(hammer32, "AddUintptrMethod") + delete(hammer32, "CompareAndSwapUintptrMethod") + } +} + +func hammerSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(SwapInt32(addr, int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(addr.Swap(int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32(addr *uint32, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := SwapUint32(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := addr.Swap(new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := SwapUintptr(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %#08x", old)) + } + } +} + +func hammerSwapUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := addr.Swap(new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("Uintptr.Swap is not atomic: %#08x", old)) + } + } +} + +func hammerAddInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt32(addr, 1) + } +} + +func hammerAddInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + AddUint32(addr, 1) + } +} + +func hammerAddUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerAddUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerCompareAndSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt32(addr) + if CompareAndSwapInt32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapInt32Method(uaddr *uint32, count int) { + addr := (*Int32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32(addr *uint32, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint32(addr) + if CompareAndSwapUint32(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint32Method(uaddr *uint32, count int) { + addr := (*Uint32)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr32Method(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func TestHammer32(t *testutil.TestRunner) { + const p = 4 + n := 100000 + if short { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer32 { + c := make(chan int) + var val uint32 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +var hammer64 = map[string]func(*uint64, int){ + "SwapInt64": hammerSwapInt64, + "SwapUint64": hammerSwapUint64, + "SwapUintptr": hammerSwapUintptr64, + "AddInt64": hammerAddInt64, + "AddUint64": hammerAddUint64, + "AddUintptr": hammerAddUintptr64, + "CompareAndSwapInt64": hammerCompareAndSwapInt64, + "CompareAndSwapUint64": hammerCompareAndSwapUint64, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64, + + "SwapInt64Method": hammerSwapInt64Method, + "SwapUint64Method": hammerSwapUint64Method, + "SwapUintptrMethod": hammerSwapUintptr64Method, + "AddInt64Method": hammerAddInt64Method, + "AddUint64Method": hammerAddUint64Method, + "AddUintptrMethod": hammerAddUintptr64Method, + "CompareAndSwapInt64Method": hammerCompareAndSwapInt64Method, + "CompareAndSwapUint64Method": hammerCompareAndSwapUint64Method, + "CompareAndSwapUintptrMethod": hammerCompareAndSwapUintptr64Method, +} + +func init() { + var v uint64 = 1 << 50 + if uintptr(v) == 0 { + // 32-bit system; clear uintptr tests + delete(hammer64, "SwapUintptr") + delete(hammer64, "SwapUintptrMethod") + delete(hammer64, "AddUintptr") + delete(hammer64, "AddUintptrMethod") + delete(hammer64, "CompareAndSwapUintptr") + delete(hammer64, "CompareAndSwapUintptrMethod") + } +} + +func hammerSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(SwapInt64(addr, int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(addr.Swap(int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64(addr *uint64, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := SwapUint64(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := addr.Swap(new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +const arch32 = unsafe.Sizeof(uintptr(0)) == 4 + +func hammerSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + if !arch32 { + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := SwapUintptr(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } + } +} + +func hammerSwapUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + if !arch32 { + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := addr.Swap(new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } + } +} + +func hammerAddInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddInt64(addr, 1) + } +} + +func hammerAddInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + AddUint64(addr, 1) + } +} + +func hammerAddUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerAddUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + AddUintptr(addr, 1) + } +} + +func hammerAddUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + addr.Add(1) + } +} + +func hammerCompareAndSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadInt64(addr) + if CompareAndSwapInt64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapInt64Method(uaddr *uint64, count int) { + addr := (*Int64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64(addr *uint64, count int) { + for i := 0; i < count; i++ { + for { + v := LoadUint64(addr) + if CompareAndSwapUint64(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUint64Method(uaddr *uint64, count int) { + addr := (*Uint64)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := LoadUintptr(addr) + if CompareAndSwapUintptr(addr, v, v+1) { + break + } + } + } +} + +func hammerCompareAndSwapUintptr64Method(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*Uintptr)(unsafe.Pointer(uaddr)) + for i := 0; i < count; i++ { + for { + v := addr.Load() + if addr.CompareAndSwap(v, v+1) { + break + } + } + } +} + +func TestHammer64(t *testutil.TestRunner) { + const p = 4 + n := 100000 + if short { + n = 1000 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) + + for name, testf := range hammer64 { + c := make(chan int) + var val uint64 + for i := 0; i < p; i++ { + go func() { + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) + }() + } + for i := 0; i < p; i++ { + <-c + } + if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) + } + } +} + +func hammerStoreLoadInt32(t testing.TB, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadInt32Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*int32)(paddr) + v := LoadInt32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Int32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreInt32(addr, new) +} + +func hammerStoreLoadUint32(t testing.TB, paddr unsafe.Pointer) { + addr := (*uint32)(paddr) + v := LoadUint32(addr) + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + StoreUint32(addr, new) +} + +func hammerStoreLoadUint32Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uint32)(paddr) + v := addr.Load() + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uint32: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + addr.Store(new) +} + +func hammerStoreLoadInt64(t testing.TB, paddr unsafe.Pointer) { + addr := (*int64)(paddr) + v := LoadInt64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreInt64(addr, new) +} + +func hammerStoreLoadInt64Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Int64)(paddr) + v := addr.Load() + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Int64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + addr.Store(new) +} + +func hammerStoreLoadUint64(t testing.TB, paddr unsafe.Pointer) { + addr := (*uint64)(paddr) + v := LoadUint64(addr) + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + StoreUint64(addr, new) +} + +func hammerStoreLoadUint64Method(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uint64)(paddr) + v := addr.Load() + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uint64: %#x != %#x", vlo, vhi) + } + new := v + 1 + 1<<32 + addr.Store(new) +} + +func hammerStoreLoadUintptr(t testing.TB, paddr unsafe.Pointer) { + addr := (*uintptr)(paddr) + v := LoadUintptr(addr) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StoreUintptr(addr, new) +} + +//go:nocheckptr +func hammerStoreLoadUintptrMethod(t testing.TB, paddr unsafe.Pointer) { + addr := (*Uintptr)(paddr) + v := addr.Load() + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Uintptr: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + addr.Store(new) +} + +// This code is just testing that LoadPointer/StorePointer operate +// atomically; it's not actually calculating pointers. +// +//go:nocheckptr +func hammerStoreLoadPointer(t testing.TB, paddr unsafe.Pointer) { + addr := (*unsafe.Pointer)(paddr) + v := uintptr(LoadPointer(addr)) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + StorePointer(addr, unsafe.Pointer(new)) +} + +// This code is just testing that LoadPointer/StorePointer operate +// atomically; it's not actually calculating pointers. +// +//go:nocheckptr +func hammerStoreLoadPointerMethod(t testing.TB, paddr unsafe.Pointer) { + addr := (*Pointer[byte])(paddr) + v := uintptr(unsafe.Pointer(addr.Load())) + new := v + if arch32 { + vlo := v & ((1 << 16) - 1) + vhi := v >> 16 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + new = v + 1 + 1<<16 + if vlo == 1e4 { + new = 0 + } + } else { + vlo := v & ((1 << 32) - 1) + vhi := v >> 32 + if vlo != vhi { + t.Fatalf("Pointer: %#x != %#x", vlo, vhi) + } + inc := uint64(1 + 1<<32) + new = v + uintptr(inc) + } + addr.Store((*byte)(unsafe.Pointer(new))) +} + +func TestHammerStoreLoad(t *testutil.TestRunner) { + tests := []func(testing.TB, unsafe.Pointer){ + hammerStoreLoadInt32, hammerStoreLoadUint32, + hammerStoreLoadUintptr, hammerStoreLoadPointer, + hammerStoreLoadInt32Method, hammerStoreLoadUint32Method, + hammerStoreLoadUintptrMethod, hammerStoreLoadPointerMethod, + hammerStoreLoadInt64, hammerStoreLoadUint64, + hammerStoreLoadInt64Method, hammerStoreLoadUint64Method, + } + n := int(1e6) + if short { + n = int(1e4) + } + const procs = 8 + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + // Disable the GC because hammerStoreLoadPointer invokes + // write barriers on values that aren't real pointers. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + // Ensure any in-progress GC is finished. + runtime.GC() + for _, tt := range tests { + c := make(chan int) + var val uint64 + for p := 0; p < procs; p++ { + go func() { + for i := 0; i < n; i++ { + tt(t, unsafe.Pointer(&val)) + } + c <- 1 + }() + } + for p := 0; p < procs; p++ { + <-c + } + } +} + +func TestStoreLoadSeqCst32(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if short { + N = int32(1e2) + } + c := make(chan bool, 2) + X := [2]int32{} + ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int32(1); i < N; i++ { + StoreInt32(&X[me], i) + my := LoadInt32(&X[he]) + StoreInt32(&ack[me][i%3], my) + for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt32(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt32(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadSeqCst64(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if short { + N = int64(1e2) + } + c := make(chan bool, 2) + X := [2]int64{} + ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}} + for p := 0; p < 2; p++ { + go func(me int) { + he := 1 - me + for i := int64(1); i < N; i++ { + StoreInt64(&X[me], i) + my := LoadInt64(&X[he]) + StoreInt64(&ack[me][i%3], my) + for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + his := LoadInt64(&ack[he][i%3]) + if (my != i && my != i-1) || (his != i && his != i-1) { + t.Errorf("invalid values: %d/%d (%d)", my, his, i) + break + } + if my != i && his != i { + t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i) + break + } + StoreInt64(&ack[me][(i-1)%3], -1) + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq32(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int32(1e3) + if short { + N = int32(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int32 + pad1 [128]int8 + data1 int32 + pad2 [128]int8 + data2 float32 + } + var X Data + for p := int32(0); p < 2; p++ { + go func(p int32) { + for i := int32(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float32(i) + StoreInt32(&X.signal, i) + } else { + for w := 1; LoadInt32(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float32(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func TestStoreLoadRelAcq64(t *testutil.TestRunner) { + if runtime.NumCPU() == 1 { + t.Skipf("Skipping test on %v processor machine", runtime.NumCPU()) + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) + N := int64(1e3) + if short { + N = int64(1e2) + } + c := make(chan bool, 2) + type Data struct { + signal int64 + pad1 [128]int8 + data1 int64 + pad2 [128]int8 + data2 float64 + } + var X Data + for p := int64(0); p < 2; p++ { + go func(p int64) { + for i := int64(1); i < N; i++ { + if (i+p)%2 == 0 { + X.data1 = i + X.data2 = float64(i) + StoreInt64(&X.signal, i) + } else { + for w := 1; LoadInt64(&X.signal) != i; w++ { + if w%1000 == 0 { + runtime.Gosched() + } + } + d1 := X.data1 + d2 := X.data2 + if d1 != i || d2 != float64(i) { + t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i) + break + } + } + } + c <- true + }(p) + } + <-c + <-c +} + +func shouldPanic(t testing.TB, name string, f func()) { + defer func() { + // Check that all GC maps are sane. + runtime.GC() + + err := recover() + want := "unaligned 64-bit atomic operation" + if err == nil { + t.Errorf("%s did not panic", name) + } else if s, _ := err.(string); s != want { + t.Errorf("%s: wanted panic %q, got %q", name, want, err) + } + }() + f() +} + +func TestUnaligned64(t *testutil.TestRunner) { + // Unaligned 64-bit atomics on 32-bit systems are + // a continual source of pain. Test that on 32-bit systems they crash + // instead of failing silently. + if !arch32 { + t.Skip("test only runs on 32-bit systems") + } + + x := make([]uint32, 4) + p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned + + shouldPanic(t, "LoadUint64", func() { LoadUint64(p) }) + shouldPanic(t, "LoadUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Load() }) + shouldPanic(t, "StoreUint64", func() { StoreUint64(p, 1) }) + shouldPanic(t, "StoreUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Store(1) }) + shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) }) + shouldPanic(t, "CompareAndSwapUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).CompareAndSwap(1, 2) }) + shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) }) + shouldPanic(t, "AddUint64Method", func() { (*Uint64)(unsafe.Pointer(p)).Add(3) }) +} + +func TestAutoAligned64(t *testutil.TestRunner) { + var signed struct { + _ uint32 + i Int64 + } + if o := reflect.TypeOf(&signed).Elem().Field(1).Offset; o != 8 { + t.Fatalf("Int64 offset = %d, want 8", o) + } + if p := reflect.ValueOf(&signed).Elem().Field(1).Addr().Pointer(); p&7 != 0 { + t.Fatalf("Int64 pointer = %#x, want 8-aligned", p) + } + + var unsigned struct { + _ uint32 + i Uint64 + } + if o := reflect.TypeOf(&unsigned).Elem().Field(1).Offset; o != 8 { + t.Fatalf("Uint64 offset = %d, want 8", o) + } + if p := reflect.ValueOf(&unsigned).Elem().Field(1).Addr().Pointer(); p&7 != 0 { + t.Fatalf("Int64 pointer = %#x, want 8-aligned", p) + } +} + +func TestNilDeref(t *testutil.TestRunner) { + funcs := [...]func(){ + func() { CompareAndSwapInt32(nil, 0, 0) }, + func() { (*Int32)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapInt64(nil, 0, 0) }, + func() { (*Int64)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUint32(nil, 0, 0) }, + func() { (*Uint32)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUint64(nil, 0, 0) }, + func() { (*Uint64)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapUintptr(nil, 0, 0) }, + func() { (*Uintptr)(nil).CompareAndSwap(0, 0) }, + func() { CompareAndSwapPointer(nil, nil, nil) }, + func() { (*Pointer[byte])(nil).CompareAndSwap(nil, nil) }, + func() { SwapInt32(nil, 0) }, + func() { (*Int32)(nil).Swap(0) }, + func() { SwapUint32(nil, 0) }, + func() { (*Uint32)(nil).Swap(0) }, + func() { SwapInt64(nil, 0) }, + func() { (*Int64)(nil).Swap(0) }, + func() { SwapUint64(nil, 0) }, + func() { (*Uint64)(nil).Swap(0) }, + func() { SwapUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Swap(0) }, + func() { SwapPointer(nil, nil) }, + func() { (*Pointer[byte])(nil).Swap(nil) }, + func() { AddInt32(nil, 0) }, + func() { (*Int32)(nil).Add(0) }, + func() { AddUint32(nil, 0) }, + func() { (*Uint32)(nil).Add(0) }, + func() { AddInt64(nil, 0) }, + func() { (*Int64)(nil).Add(0) }, + func() { AddUint64(nil, 0) }, + func() { (*Uint64)(nil).Add(0) }, + func() { AddUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Add(0) }, + func() { LoadInt32(nil) }, + func() { (*Int32)(nil).Load() }, + func() { LoadInt64(nil) }, + func() { (*Int64)(nil).Load() }, + func() { LoadUint32(nil) }, + func() { (*Uint32)(nil).Load() }, + func() { LoadUint64(nil) }, + func() { (*Uint64)(nil).Load() }, + func() { LoadUintptr(nil) }, + func() { (*Uintptr)(nil).Load() }, + func() { LoadPointer(nil) }, + func() { (*Pointer[byte])(nil).Load() }, + func() { StoreInt32(nil, 0) }, + func() { (*Int32)(nil).Store(0) }, + func() { StoreInt64(nil, 0) }, + func() { (*Int64)(nil).Store(0) }, + func() { StoreUint32(nil, 0) }, + func() { (*Uint32)(nil).Store(0) }, + func() { StoreUint64(nil, 0) }, + func() { (*Uint64)(nil).Store(0) }, + func() { StoreUintptr(nil, 0) }, + func() { (*Uintptr)(nil).Store(0) }, + func() { StorePointer(nil, nil) }, + func() { (*Pointer[byte])(nil).Store(nil) }, + } + for _, f := range funcs { + func() { + defer func() { + runtime.GC() + recover() + }() + f() + }() + } +} + +// Test that this compiles. +// When atomic.Pointer used _ [0]T, it did not. +type List struct { + Next Pointer[List] +} diff --git a/cannon/testdata/example/mt-atomic/go.mod b/cannon/testdata/example/mt-atomic/go.mod new file mode 100644 index 00000000000..042386cf702 --- /dev/null +++ b/cannon/testdata/example/mt-atomic/go.mod @@ -0,0 +1,8 @@ +module atomic + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-atomic/main.go b/cannon/testdata/example/mt-atomic/main.go new file mode 100644 index 00000000000..9d683c5bd59 --- /dev/null +++ b/cannon/testdata/example/mt-atomic/main.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestSwapInt32, "TestSwapInt32") + testutil.RunTest(TestSwapInt32Method, "TestSwapInt32Method") + testutil.RunTest(TestSwapUint32, "TestSwapUint32") + testutil.RunTest(TestSwapUint32Method, "TestSwapUint32Method") + testutil.RunTest(TestSwapInt64, "TestSwapInt64") + testutil.RunTest(TestSwapInt64Method, "TestSwapInt64Method") + testutil.RunTest(TestSwapUint64, "TestSwapUint64") + testutil.RunTest(TestSwapUint64Method, "TestSwapUint64Method") + testutil.RunTest(TestSwapUintptr, "TestSwapUintptr") + testutil.RunTest(TestSwapUintptrMethod, "TestSwapUintptrMethod") + testutil.RunTest(TestSwapPointer, "TestSwapPointer") + testutil.RunTest(TestSwapPointerMethod, "TestSwapPointerMethod") + testutil.RunTest(TestAddInt32, "TestAddInt32") + testutil.RunTest(TestAddInt32Method, "TestAddInt32Method") + testutil.RunTest(TestAddUint32, "TestAddUint32") + testutil.RunTest(TestAddUint32Method, "TestAddUint32Method") + testutil.RunTest(TestAddInt64, "TestAddInt64") + testutil.RunTest(TestAddInt64Method, "TestAddInt64Method") + testutil.RunTest(TestAddUint64, "TestAddUint64") + testutil.RunTest(TestAddUint64Method, "TestAddUint64Method") + testutil.RunTest(TestAddUintptr, "TestAddUintptr") + testutil.RunTest(TestAddUintptrMethod, "TestAddUintptrMethod") + testutil.RunTest(TestCompareAndSwapInt32, "TestCompareAndSwapInt32") + testutil.RunTest(TestCompareAndSwapInt32Method, "TestCompareAndSwapInt32Method") + testutil.RunTest(TestCompareAndSwapUint32, "TestCompareAndSwapUint32") + testutil.RunTest(TestCompareAndSwapUint32Method, "TestCompareAndSwapUint32Method") + testutil.RunTest(TestCompareAndSwapInt64, "TestCompareAndSwapInt64") + testutil.RunTest(TestCompareAndSwapInt64Method, "TestCompareAndSwapInt64Method") + testutil.RunTest(TestCompareAndSwapUint64, "TestCompareAndSwapUint64") + testutil.RunTest(TestCompareAndSwapUint64Method, "TestCompareAndSwapUint64Method") + testutil.RunTest(TestCompareAndSwapUintptr, "TestCompareAndSwapUintptr") + testutil.RunTest(TestCompareAndSwapUintptrMethod, "TestCompareAndSwapUintptrMethod") + testutil.RunTest(TestCompareAndSwapPointer, "TestCompareAndSwapPointer") + testutil.RunTest(TestCompareAndSwapPointerMethod, "TestCompareAndSwapPointerMethod") + testutil.RunTest(TestLoadInt32, "TestLoadInt32") + testutil.RunTest(TestLoadInt32Method, "TestLoadInt32Method") + testutil.RunTest(TestLoadUint32, "TestLoadUint32") + testutil.RunTest(TestLoadUint32Method, "TestLoadUint32Method") + testutil.RunTest(TestLoadInt64, "TestLoadInt64") + testutil.RunTest(TestLoadInt64Method, "TestLoadInt64Method") + testutil.RunTest(TestLoadUint64, "TestLoadUint64") + testutil.RunTest(TestLoadUint64Method, "TestLoadUint64Method") + testutil.RunTest(TestLoadUintptr, "TestLoadUintptr") + testutil.RunTest(TestLoadUintptrMethod, "TestLoadUintptrMethod") + testutil.RunTest(TestLoadPointer, "TestLoadPointer") + testutil.RunTest(TestLoadPointerMethod, "TestLoadPointerMethod") + testutil.RunTest(TestStoreInt32, "TestStoreInt32") + testutil.RunTest(TestStoreInt32Method, "TestStoreInt32Method") + testutil.RunTest(TestStoreUint32, "TestStoreUint32") + testutil.RunTest(TestStoreUint32Method, "TestStoreUint32Method") + testutil.RunTest(TestStoreInt64, "TestStoreInt64") + testutil.RunTest(TestStoreInt64Method, "TestStoreInt64Method") + testutil.RunTest(TestStoreUint64, "TestStoreUint64") + testutil.RunTest(TestStoreUint64Method, "TestStoreUint64Method") + testutil.RunTest(TestStoreUintptr, "TestStoreUintptr") + testutil.RunTest(TestStoreUintptrMethod, "TestStoreUintptrMethod") + testutil.RunTest(TestStorePointer, "TestStorePointer") + testutil.RunTest(TestStorePointerMethod, "TestStorePointerMethod") + testutil.RunTest(TestHammer32, "TestHammer32") + testutil.RunTest(TestHammer64, "TestHammer64") + testutil.RunTest(TestAutoAligned64, "TestAutoAligned64") + testutil.RunTest(TestNilDeref, "TestNilDeref") + testutil.RunTest(TestStoreLoadSeqCst32, "TestStoreLoadSeqCst32") + testutil.RunTest(TestStoreLoadSeqCst64, "TestStoreLoadSeqCst64") + testutil.RunTest(TestStoreLoadRelAcq32, "TestStoreLoadRelAcq32") + testutil.RunTest(TestStoreLoadRelAcq64, "TestStoreLoadRelAcq64") + testutil.RunTest(TestUnaligned64, "TestUnaligned64") + testutil.RunTest(TestHammerStoreLoad, "TestHammerStoreLoad") + + fmt.Println("Atomic tests passed") +} diff --git a/cannon/testdata/example/multithreaded/go.mod b/cannon/testdata/example/mt-cond/go.mod similarity index 58% rename from cannon/testdata/example/multithreaded/go.mod rename to cannon/testdata/example/mt-cond/go.mod index e1bdb77a9af..d6d1853d5af 100644 --- a/cannon/testdata/example/multithreaded/go.mod +++ b/cannon/testdata/example/mt-cond/go.mod @@ -1,4 +1,4 @@ -module multithreaded +module cond go 1.22 diff --git a/cannon/testdata/example/mt-cond/main.go b/cannon/testdata/example/mt-cond/main.go new file mode 100644 index 00000000000..2b584cec999 --- /dev/null +++ b/cannon/testdata/example/mt-cond/main.go @@ -0,0 +1,302 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/cond_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "reflect" + "runtime" + "sync" +) + +func main() { + TestCondSignal() + TestCondSignalGenerations() + TestCondBroadcast() + TestRace() + TestCondSignalStealing() + TestCondCopy() + + fmt.Println("Cond test passed") +} + +func TestCondSignal() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 2 + running := make(chan bool, n) + awake := make(chan bool, n) + for i := 0; i < n; i++ { + go func() { + m.Lock() + running <- true + c.Wait() + awake <- true + m.Unlock() + }() + } + for i := 0; i < n; i++ { + <-running // Wait for everyone to run. + } + for n > 0 { + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "goroutine not asleep") + os.Exit(1) + default: + } + m.Lock() + c.Signal() + m.Unlock() + <-awake // Will deadlock if no goroutine wakes up + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "too many goroutines awake") + os.Exit(1) + default: + } + n-- + } + c.Signal() +} + +func TestCondSignalGenerations() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 100 + running := make(chan bool, n) + awake := make(chan int, n) + for i := 0; i < n; i++ { + go func(i int) { + m.Lock() + running <- true + c.Wait() + awake <- i + m.Unlock() + }(i) + if i > 0 { + a := <-awake + if a != i-1 { + _, _ = fmt.Fprintf(os.Stderr, "wrong goroutine woke up: want %d, got %d\n", i-1, a) + os.Exit(1) + } + } + <-running + m.Lock() + c.Signal() + m.Unlock() + } +} + +func TestCondBroadcast() { + var m sync.Mutex + c := sync.NewCond(&m) + n := 5 + running := make(chan int, n) + awake := make(chan int, n) + exit := false + for i := 0; i < n; i++ { + go func(g int) { + m.Lock() + for !exit { + running <- g + c.Wait() + awake <- g + } + m.Unlock() + }(i) + } + for i := 0; i < n; i++ { + for i := 0; i < n; i++ { + <-running // Will deadlock unless n are running. + } + if i == n-1 { + m.Lock() + exit = true + m.Unlock() + } + select { + case <-awake: + _, _ = fmt.Fprintln(os.Stderr, "goroutine not asleep") + os.Exit(1) + default: + } + m.Lock() + c.Broadcast() + m.Unlock() + seen := make([]bool, n) + for i := 0; i < n; i++ { + g := <-awake + if seen[g] { + _, _ = fmt.Fprintln(os.Stderr, "goroutine woke up twice") + os.Exit(1) + } + seen[g] = true + } + } + select { + case <-running: + _, _ = fmt.Fprintln(os.Stderr, "goroutine still running") + os.Exit(1) + default: + } + c.Broadcast() +} + +func TestRace() { + x := 0 + c := sync.NewCond(&sync.Mutex{}) + done := make(chan bool) + go func() { + c.L.Lock() + x = 1 + c.Wait() + if x != 2 { + _, _ = fmt.Fprintln(os.Stderr, "want 2") + os.Exit(1) + } + x = 3 + c.Signal() + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 1 { + x = 2 + c.Signal() + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 2 { + c.Wait() + if x != 3 { + _, _ = fmt.Fprintln(os.Stderr, "want 3") + os.Exit(1) + } + break + } + if x == 3 { + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + <-done + <-done + <-done +} + +func TestCondSignalStealing() { + for iters := 0; iters < 5; iters++ { + var m sync.Mutex + cond := sync.NewCond(&m) + + // Start a waiter. + ch := make(chan struct{}) + go func() { + m.Lock() + ch <- struct{}{} + cond.Wait() + m.Unlock() + + ch <- struct{}{} + }() + + <-ch + m.Lock() + m.Unlock() + + // We know that the waiter is in the cond.Wait() call because we + // synchronized with it, then acquired/released the mutex it was + // holding when we synchronized. + // + // Start two goroutines that will race: one will broadcast on + // the cond var, the other will wait on it. + // + // The new waiter may or may not get notified, but the first one + // has to be notified. + done := false + go func() { + cond.Broadcast() + }() + + go func() { + m.Lock() + for !done { + cond.Wait() + } + m.Unlock() + }() + + // Check that the first waiter does get signaled. + <-ch + + // Release the second waiter in case it didn't get the + // broadcast. + m.Lock() + done = true + m.Unlock() + cond.Broadcast() + } +} + +func TestCondCopy() { + defer func() { + err := recover() + if err == nil || err.(string) != "sync.Cond is copied" { + _, _ = fmt.Fprintf(os.Stderr, "got %v, expect sync.Cond is copied", err) + os.Exit(1) + } + }() + c := sync.Cond{L: &sync.Mutex{}} + c.Signal() + var c2 sync.Cond + reflect.ValueOf(&c2).Elem().Set(reflect.ValueOf(&c).Elem()) // c2 := c, hidden from vet + c2.Signal() +} diff --git a/cannon/testdata/example/mt-general/go.mod b/cannon/testdata/example/mt-general/go.mod new file mode 100644 index 00000000000..3a7bf3680f5 --- /dev/null +++ b/cannon/testdata/example/mt-general/go.mod @@ -0,0 +1,5 @@ +module mtgeneral + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/multithreaded/main.go b/cannon/testdata/example/mt-general/main.go similarity index 100% rename from cannon/testdata/example/multithreaded/main.go rename to cannon/testdata/example/mt-general/main.go diff --git a/cannon/testdata/example/mt-map/go.mod b/cannon/testdata/example/mt-map/go.mod new file mode 100644 index 00000000000..7290b372361 --- /dev/null +++ b/cannon/testdata/example/mt-map/go.mod @@ -0,0 +1,8 @@ +module map + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-map/main.go b/cannon/testdata/example/mt-map/main.go new file mode 100644 index 00000000000..577a19a3ed8 --- /dev/null +++ b/cannon/testdata/example/mt-map/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestMapMatchesRWMutex, "TestMapMatchesRWMutex") + testutil.RunTest(TestMapMatchesDeepCopy, "TestMapMatchesDeepCopy") + testutil.RunTest(TestConcurrentRange, "TestConcurrentRange") + testutil.RunTest(TestIssue40999, "TestIssue40999") + testutil.RunTest(TestMapRangeNestedCall, "TestMapRangeNestedCall") + testutil.RunTest(TestCompareAndSwap_NonExistingKey, "TestCompareAndSwap_NonExistingKey") + testutil.RunTest(TestMapRangeNoAllocations, "TestMapRangeNoAllocations") + + fmt.Println("Map test passed") +} diff --git a/cannon/testdata/example/mt-map/map_reference_test_copy.go b/cannon/testdata/example/mt-map/map_reference_test_copy.go new file mode 100644 index 00000000000..3beeb1501c5 --- /dev/null +++ b/cannon/testdata/example/mt-map/map_reference_test_copy.go @@ -0,0 +1,299 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_reference_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "sync" + "sync/atomic" +) + +// This file contains reference map implementations for unit-tests. + +// mapInterface is the interface Map implements. +type mapInterface interface { + Load(any) (any, bool) + Store(key, value any) + LoadOrStore(key, value any) (actual any, loaded bool) + LoadAndDelete(key any) (value any, loaded bool) + Delete(any) + Swap(key, value any) (previous any, loaded bool) + CompareAndSwap(key, old, new any) (swapped bool) + CompareAndDelete(key, old any) (deleted bool) + Range(func(key, value any) (shouldContinue bool)) +} + +var ( + _ mapInterface = &RWMutexMap{} + _ mapInterface = &DeepCopyMap{} +) + +// RWMutexMap is an implementation of mapInterface using a sync.RWMutex. +type RWMutexMap struct { + mu sync.RWMutex + dirty map[any]any +} + +func (m *RWMutexMap) Load(key any) (value any, ok bool) { + m.mu.RLock() + value, ok = m.dirty[key] + m.mu.RUnlock() + return +} + +func (m *RWMutexMap) Store(key, value any) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[any]any) + } + m.dirty[key] = value + m.mu.Unlock() +} + +func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) { + m.mu.Lock() + actual, loaded = m.dirty[key] + if !loaded { + actual = value + if m.dirty == nil { + m.dirty = make(map[any]any) + } + m.dirty[key] = value + } + m.mu.Unlock() + return actual, loaded +} + +func (m *RWMutexMap) Swap(key, value any) (previous any, loaded bool) { + m.mu.Lock() + if m.dirty == nil { + m.dirty = make(map[any]any) + } + + previous, loaded = m.dirty[key] + m.dirty[key] = value + m.mu.Unlock() + return +} + +func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) { + m.mu.Lock() + value, loaded = m.dirty[key] + if !loaded { + m.mu.Unlock() + return nil, false + } + delete(m.dirty, key) + m.mu.Unlock() + return value, loaded +} + +func (m *RWMutexMap) Delete(key any) { + m.mu.Lock() + delete(m.dirty, key) + m.mu.Unlock() +} + +func (m *RWMutexMap) CompareAndSwap(key, old, new any) (swapped bool) { + m.mu.Lock() + defer m.mu.Unlock() + if m.dirty == nil { + return false + } + + value, loaded := m.dirty[key] + if loaded && value == old { + m.dirty[key] = new + return true + } + return false +} + +func (m *RWMutexMap) CompareAndDelete(key, old any) (deleted bool) { + m.mu.Lock() + defer m.mu.Unlock() + if m.dirty == nil { + return false + } + + value, loaded := m.dirty[key] + if loaded && value == old { + delete(m.dirty, key) + return true + } + return false +} + +func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) { + m.mu.RLock() + keys := make([]any, 0, len(m.dirty)) + for k := range m.dirty { + keys = append(keys, k) + } + m.mu.RUnlock() + + for _, k := range keys { + v, ok := m.Load(k) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +// DeepCopyMap is an implementation of mapInterface using a Mutex and +// atomic.Value. It makes deep copies of the map on every write to avoid +// acquiring the Mutex in Load. +type DeepCopyMap struct { + mu sync.Mutex + clean atomic.Value +} + +func (m *DeepCopyMap) Load(key any) (value any, ok bool) { + clean, _ := m.clean.Load().(map[any]any) + value, ok = clean[key] + return value, ok +} + +func (m *DeepCopyMap) Store(key, value any) { + m.mu.Lock() + dirty := m.dirty() + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) { + clean, _ := m.clean.Load().(map[any]any) + actual, loaded = clean[key] + if loaded { + return actual, loaded + } + + m.mu.Lock() + // Reload clean in case it changed while we were waiting on m.mu. + clean, _ = m.clean.Load().(map[any]any) + actual, loaded = clean[key] + if !loaded { + dirty := m.dirty() + dirty[key] = value + actual = value + m.clean.Store(dirty) + } + m.mu.Unlock() + return actual, loaded +} + +func (m *DeepCopyMap) Swap(key, value any) (previous any, loaded bool) { + m.mu.Lock() + dirty := m.dirty() + previous, loaded = dirty[key] + dirty[key] = value + m.clean.Store(dirty) + m.mu.Unlock() + return +} + +func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) { + m.mu.Lock() + dirty := m.dirty() + value, loaded = dirty[key] + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() + return +} + +func (m *DeepCopyMap) Delete(key any) { + m.mu.Lock() + dirty := m.dirty() + delete(dirty, key) + m.clean.Store(dirty) + m.mu.Unlock() +} + +func (m *DeepCopyMap) CompareAndSwap(key, old, new any) (swapped bool) { + clean, _ := m.clean.Load().(map[any]any) + if previous, ok := clean[key]; !ok || previous != old { + return false + } + + m.mu.Lock() + defer m.mu.Unlock() + dirty := m.dirty() + value, loaded := dirty[key] + if loaded && value == old { + dirty[key] = new + m.clean.Store(dirty) + return true + } + return false +} + +func (m *DeepCopyMap) CompareAndDelete(key, old any) (deleted bool) { + clean, _ := m.clean.Load().(map[any]any) + if previous, ok := clean[key]; !ok || previous != old { + return false + } + + m.mu.Lock() + defer m.mu.Unlock() + + dirty := m.dirty() + value, loaded := dirty[key] + if loaded && value == old { + delete(dirty, key) + m.clean.Store(dirty) + return true + } + return false +} + +func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[any]any) + for k, v := range clean { + if !f(k, v) { + break + } + } +} + +func (m *DeepCopyMap) dirty() map[any]any { + clean, _ := m.clean.Load().(map[any]any) + dirty := make(map[any]any, len(clean)+1) + for k, v := range clean { + dirty[k] = v + } + return dirty +} diff --git a/cannon/testdata/example/mt-map/map_test_copy.go b/cannon/testdata/example/mt-map/map_test_copy.go new file mode 100644 index 00000000000..7b8806698f9 --- /dev/null +++ b/cannon/testdata/example/mt-map/map_test_copy.go @@ -0,0 +1,325 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/map_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "math/rand" + "reflect" + "runtime" + "sync" + "sync/atomic" + "testing" + "testing/quick" + + "utils/testutil" +) + +type mapOp string + +const ( + opLoad = mapOp("Load") + opStore = mapOp("Store") + opLoadOrStore = mapOp("LoadOrStore") + opLoadAndDelete = mapOp("LoadAndDelete") + opDelete = mapOp("Delete") + opSwap = mapOp("Swap") + opCompareAndSwap = mapOp("CompareAndSwap") + opCompareAndDelete = mapOp("CompareAndDelete") +) + +var mapOps = [...]mapOp{ + opLoad, + opStore, + opLoadOrStore, + opLoadAndDelete, + opDelete, + opSwap, + opCompareAndSwap, + opCompareAndDelete, +} + +// mapCall is a quick.Generator for calls on mapInterface. +type mapCall struct { + op mapOp + k, v any +} + +func (c mapCall) apply(m mapInterface) (any, bool) { + switch c.op { + case opLoad: + return m.Load(c.k) + case opStore: + m.Store(c.k, c.v) + return nil, false + case opLoadOrStore: + return m.LoadOrStore(c.k, c.v) + case opLoadAndDelete: + return m.LoadAndDelete(c.k) + case opDelete: + m.Delete(c.k) + return nil, false + case opSwap: + return m.Swap(c.k, c.v) + case opCompareAndSwap: + if m.CompareAndSwap(c.k, c.v, rand.Int()) { + m.Delete(c.k) + return c.v, true + } + return nil, false + case opCompareAndDelete: + if m.CompareAndDelete(c.k, c.v) { + if _, ok := m.Load(c.k); !ok { + return nil, true + } + } + return nil, false + default: + panic("invalid mapOp") + } +} + +type mapResult struct { + value any + ok bool +} + +func randValue(r *rand.Rand) any { + b := make([]byte, r.Intn(4)) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { + c := mapCall{op: mapOps[rand.Intn(len(mapOps))], k: randValue(r)} + switch c.op { + case opStore, opLoadOrStore: + c.v = randValue(r) + } + return reflect.ValueOf(c) +} + +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) { + for _, c := range calls { + v, ok := c.apply(m) + results = append(results, mapResult{v, ok}) + } + + final = make(map[any]any) + m.Range(func(k, v any) bool { + final[k] = v + return true + }) + + return results, final +} + +func applyMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(sync.Map), calls) +} + +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(RWMutexMap), calls) +} + +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) { + return applyCalls(new(DeepCopyMap), calls) +} + +func TestMapMatchesRWMutex(t *testutil.TestRunner) { + if err := quick.CheckEqual(applyMap, applyRWMutexMap, nil); err != nil { + t.Error(err) + } +} + +func TestMapMatchesDeepCopy(t *testutil.TestRunner) { + if err := quick.CheckEqual(applyMap, applyDeepCopyMap, nil); err != nil { + t.Error(err) + } +} + +func TestConcurrentRange(t *testutil.TestRunner) { + const mapSize = 1 << 10 + + m := new(sync.Map) + for n := int64(1); n <= mapSize; n++ { + m.Store(n, int64(n)) + } + + done := make(chan struct{}) + var wg sync.WaitGroup + defer func() { + close(done) + wg.Wait() + }() + for g := int64(runtime.GOMAXPROCS(0)); g > 0; g-- { + r := rand.New(rand.NewSource(g)) + wg.Add(1) + go func(g int64) { + defer wg.Done() + for i := int64(0); ; i++ { + select { + case <-done: + return + default: + } + for n := int64(1); n < mapSize; n++ { + if r.Int63n(mapSize) == 0 { + m.Store(n, n*i*g) + } else { + m.Load(n) + } + } + } + }(g) + } + + //iters := 1 << 10 + //if testing.Short() { + // iters = 16 + //} + iters := 16 + for n := iters; n > 0; n-- { + seen := make(map[int64]bool, mapSize) + + m.Range(func(ki, vi any) bool { + k, v := ki.(int64), vi.(int64) + if v%k != 0 { + t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) + } + if seen[k] { + t.Fatalf("Range visited key %v twice", k) + } + seen[k] = true + return true + }) + + if len(seen) != mapSize { + t.Fatalf("Range visited %v elements of %v-element Map", len(seen), mapSize) + } + } +} + +func TestIssue40999(t *testutil.TestRunner) { + var m sync.Map + + // Since the miss-counting in missLocked (via Delete) + // compares the miss count with len(m.dirty), + // add an initial entry to bias len(m.dirty) above the miss count. + m.Store(nil, struct{}{}) + + var finalized uint32 + + // Set finalizers that count for collected keys. A non-zero count + // indicates that keys have not been leaked. + for atomic.LoadUint32(&finalized) == 0 { + p := new(int) + runtime.SetFinalizer(p, func(*int) { + atomic.AddUint32(&finalized, 1) + }) + m.Store(p, struct{}{}) + m.Delete(p) + runtime.GC() + } +} + +func TestMapRangeNestedCall(t *testutil.TestRunner) { // Issue 46399 + var m sync.Map + for i, v := range [3]string{"hello", "world", "Go"} { + m.Store(i, v) + } + m.Range(func(key, value any) bool { + m.Range(func(key, value any) bool { + // We should be able to load the key offered in the Range callback, + // because there are no concurrent Delete involved in this tested map. + if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) { + t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value) + } + + // We didn't keep 42 and a value into the map before, if somehow we loaded + // a value from such a key, meaning there must be an internal bug regarding + // nested range in the Map. + if _, loaded := m.LoadOrStore(42, "dummy"); loaded { + t.Fatalf("Nested Range loads unexpected value, want store a new value") + } + + // Try to Store then LoadAndDelete the corresponding value with the key + // 42 to the Map. In this case, the key 42 and associated value should be + // removed from the Map. Therefore any future range won't observe key 42 + // as we checked in above. + val := "sync.Map" + m.Store(42, val) + if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) { + t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val) + } + return true + }) + + // Remove key from Map on-the-fly. + m.Delete(key) + return true + }) + + // After a Range of Delete, all keys should be removed and any + // further Range won't invoke the callback. Hence length remains 0. + length := 0 + m.Range(func(key, value any) bool { + length++ + return true + }) + + if length != 0 { + t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0) + } +} + +func TestCompareAndSwap_NonExistingKey(t *testutil.TestRunner) { + m := &sync.Map{} + if m.CompareAndSwap(m, nil, 42) { + // See https://go.dev/issue/51972#issuecomment-1126408637. + t.Fatalf("CompareAndSwap on a non-existing key succeeded") + } +} + +func TestMapRangeNoAllocations(t *testutil.TestRunner) { // Issue 62404 + var m sync.Map + allocs := testing.AllocsPerRun(10, func() { + m.Range(func(key, value any) bool { + return true + }) + }) + if allocs > 0 { + t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs) + } +} diff --git a/cannon/testdata/example/mt-mutex/go.mod b/cannon/testdata/example/mt-mutex/go.mod new file mode 100644 index 00000000000..3aceb4c8ceb --- /dev/null +++ b/cannon/testdata/example/mt-mutex/go.mod @@ -0,0 +1,8 @@ +module mutex + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-mutex/main.go b/cannon/testdata/example/mt-mutex/main.go new file mode 100644 index 00000000000..1a3b75c231b --- /dev/null +++ b/cannon/testdata/example/mt-mutex/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestSemaphore, "TestSemaphore") + testutil.RunTest(TestMutex, "TestMutex") + testutil.RunTest(TestMutexFairness, "TestMutexFairness") + + fmt.Println("Mutex test passed") +} diff --git a/cannon/testdata/example/mt-mutex/mutex_test_copy.go b/cannon/testdata/example/mt-mutex/mutex_test_copy.go new file mode 100644 index 00000000000..d3ed9343c3d --- /dev/null +++ b/cannon/testdata/example/mt-mutex/mutex_test_copy.go @@ -0,0 +1,135 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/mutex_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "runtime" + . "sync" + "time" + + "utils/testutil" +) + +func HammerSemaphore(s *uint32, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + Runtime_Semacquire(s) + Runtime_Semrelease(s, false, 0) + } + cdone <- true +} + +func TestSemaphore(t *testutil.TestRunner) { + s := new(uint32) + *s = 1 + c := make(chan bool) + for i := 0; i < 10; i++ { + go HammerSemaphore(s, 1000, c) + } + for i := 0; i < 10; i++ { + <-c + } +} + +func HammerMutex(m *Mutex, loops int, cdone chan bool) { + for i := 0; i < loops; i++ { + if i%3 == 0 { + if m.TryLock() { + m.Unlock() + } + continue + } + m.Lock() + m.Unlock() + } + cdone <- true +} + +func TestMutex(t *testutil.TestRunner) { + if n := runtime.SetMutexProfileFraction(1); n != 0 { + t.Logf("got mutexrate %d expected 0", n) + } + defer runtime.SetMutexProfileFraction(0) + + m := new(Mutex) + + m.Lock() + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex locked") + } + m.Unlock() + if !m.TryLock() { + t.Fatalf("TryLock failed with mutex unlocked") + } + m.Unlock() + + c := make(chan bool) + for i := 0; i < 10; i++ { + go HammerMutex(m, 1000, c) + } + for i := 0; i < 10; i++ { + <-c + } +} + +func TestMutexFairness(t *testutil.TestRunner) { + var mu Mutex + stop := make(chan bool) + defer close(stop) + go func() { + for { + mu.Lock() + time.Sleep(100 * time.Microsecond) + mu.Unlock() + select { + case <-stop: + return + default: + } + } + }() + done := make(chan bool, 1) + go func() { + for i := 0; i < 10; i++ { + time.Sleep(100 * time.Microsecond) + mu.Lock() + mu.Unlock() + } + done <- true + }() + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("can't acquire Mutex in 10 seconds") + } +} diff --git a/cannon/testdata/example/mt-mutex/runtime.go b/cannon/testdata/example/mt-mutex/runtime.go new file mode 100644 index 00000000000..8d70cb10b35 --- /dev/null +++ b/cannon/testdata/example/mt-mutex/runtime.go @@ -0,0 +1,14 @@ +package main + +import ( + _ "unsafe" // Required for go:linkname +) + +var Runtime_Semacquire = runtime_Semacquire +var Runtime_Semrelease = runtime_Semrelease + +//go:linkname runtime_Semacquire sync.runtime_Semacquire +func runtime_Semacquire(s *uint32) + +//go:linkname runtime_Semrelease sync.runtime_Semrelease +func runtime_Semrelease(s *uint32, handoff bool, skipframes int) diff --git a/cannon/testdata/example/mt-once/go.mod b/cannon/testdata/example/mt-once/go.mod new file mode 100644 index 00000000000..7595e1de483 --- /dev/null +++ b/cannon/testdata/example/mt-once/go.mod @@ -0,0 +1,5 @@ +module once + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/mt-once/main.go b/cannon/testdata/example/mt-once/main.go new file mode 100644 index 00000000000..3be753e2f70 --- /dev/null +++ b/cannon/testdata/example/mt-once/main.go @@ -0,0 +1,98 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/once_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "sync" +) + +func main() { + TestOnce() + TestOncePanic() + + fmt.Println("Once test passed") +} + +type one int + +func (o *one) Increment() { + *o++ +} + +func run(once *sync.Once, o *one, c chan bool) { + once.Do(func() { o.Increment() }) + if v := *o; v != 1 { + _, _ = fmt.Fprintf(os.Stderr, "once failed inside run: %d is not 1\n", v) + os.Exit(1) + } + c <- true +} + +func TestOnce() { + o := new(one) + once := new(sync.Once) + c := make(chan bool) + const N = 10 + for i := 0; i < N; i++ { + go run(once, o, c) + } + for i := 0; i < N; i++ { + <-c + } + if *o != 1 { + _, _ = fmt.Fprintf(os.Stderr, "once failed outside run: %d is not 1\n", *o) + os.Exit(1) + } +} + +func TestOncePanic() { + var once sync.Once + func() { + defer func() { + if r := recover(); r == nil { + _, _ = fmt.Fprintf(os.Stderr, "Once.Do did not panic") + os.Exit(1) + } + }() + once.Do(func() { + panic("failed") + }) + }() + + once.Do(func() { + _, _ = fmt.Fprintf(os.Stderr, "Once.Do called twice") + os.Exit(1) + }) +} diff --git a/cannon/testdata/example/mt-oncefunc/go.mod b/cannon/testdata/example/mt-oncefunc/go.mod new file mode 100644 index 00000000000..e0f45e8c879 --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/go.mod @@ -0,0 +1,8 @@ +module oncefunc + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-oncefunc/main.go b/cannon/testdata/example/mt-oncefunc/main.go new file mode 100644 index 00000000000..d5b0badc292 --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestOnceFunc, "TestOnceFunc") + testutil.RunTest(TestOnceValue, "TestOnceValue") + testutil.RunTest(TestOnceValues, "TestOnceValues") + testutil.RunTest(TestOnceFuncPanic, "TestOnceFuncPanic") + testutil.RunTest(TestOnceValuePanic, "TestOnceValuePanic") + testutil.RunTest(TestOnceValuesPanic, "TestOnceValuesPanic") + testutil.RunTest(TestOnceFuncPanicNil, "TestOnceFuncPanicNil") + testutil.RunTest(TestOnceFuncGoexit, "TestOnceFuncGoexit") + testutil.RunTest(TestOnceFuncPanicTraceback, "TestOnceFuncPanicTraceback") + testutil.RunTest(TestOnceXGC, "TestOnceXGC") + + fmt.Println("OnceFunc tests passed") +} diff --git a/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go b/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go new file mode 100644 index 00000000000..fdbe93c260a --- /dev/null +++ b/cannon/testdata/example/mt-oncefunc/oncefunc_test_copy.go @@ -0,0 +1,265 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/oncefunc_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "bytes" + "math" + "runtime" + "runtime/debug" + "sync" + "sync/atomic" + "testing" + _ "unsafe" + + "utils/testutil" +) + +// We assume that the Once.Do tests have already covered parallelism. + +func TestOnceFunc(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { calls++ }) + allocs := testing.AllocsPerRun(10, f) + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func TestOnceValue(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValue(func() int { + calls++ + return calls + }) + allocs := testing.AllocsPerRun(10, func() { f() }) + value := f() + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if value != 1 { + t.Errorf("want value==1, got %d", value) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func TestOnceValues(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValues(func() (int, int) { + calls++ + return calls, calls + 1 + }) + allocs := testing.AllocsPerRun(10, func() { f() }) + v1, v2 := f() + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } + if v1 != 1 || v2 != 2 { + t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2) + } + if allocs != 0 { + t.Errorf("want 0 allocations per call, got %v", allocs) + } +} + +func testOncePanicX(t testing.TB, calls *int, f func()) { + testOncePanicWith(t, calls, f, func(label string, p any) { + if p != "x" { + t.Fatalf("%s: want panic %v, got %v", label, "x", p) + } + }) +} + +func testOncePanicWith(t testing.TB, calls *int, f func(), check func(label string, p any)) { + // Check that the each call to f panics with the same value, but the + // underlying function is only called once. + for _, label := range []string{"first time", "second time"} { + var p any + panicked := true + func() { + defer func() { + p = recover() + }() + f() + panicked = false + }() + if !panicked { + t.Fatalf("%s: f did not panic", label) + } + check(label, p) + } + if *calls != 1 { + t.Errorf("want calls==1, got %d", *calls) + } +} + +func TestOnceFuncPanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, f) +} + +func TestOnceValuePanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValue(func() int { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, func() { f() }) +} + +func TestOnceValuesPanic(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceValues(func() (int, int) { + calls++ + panic("x") + }) + testOncePanicX(t, &calls, func() { f() }) +} + +func TestOnceFuncPanicNil(t *testutil.TestRunner) { + calls := 0 + f := sync.OnceFunc(func() { + calls++ + panic(nil) + }) + testOncePanicWith(t, &calls, f, func(label string, p any) { + switch p.(type) { + case nil, *runtime.PanicNilError: + return + } + t.Fatalf("%s: want nil panic, got %v", label, p) + }) +} + +func TestOnceFuncGoexit(t *testutil.TestRunner) { + // If f calls Goexit, the results are unspecified. But check that f doesn't + // get called twice. + calls := 0 + f := sync.OnceFunc(func() { + calls++ + runtime.Goexit() + }) + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + defer wg.Done() + defer func() { recover() }() + f() + }() + wg.Wait() + } + if calls != 1 { + t.Errorf("want calls==1, got %d", calls) + } +} + +func TestOnceFuncPanicTraceback(t *testutil.TestRunner) { + // Test that on the first invocation of a OnceFunc, the stack trace goes all + // the way to the origin of the panic. + f := sync.OnceFunc(onceFuncPanic) + + defer func() { + if p := recover(); p != "x" { + t.Fatalf("want panic %v, got %v", "x", p) + } + stack := debug.Stack() + //want := "sync_test.onceFuncPanic" + want := "main.onceFuncPanic" + if !bytes.Contains(stack, []byte(want)) { + t.Fatalf("want stack containing %v, got:\n%s", want, string(stack)) + } + }() + f() +} + +func onceFuncPanic() { + panic("x") +} + +func TestOnceXGC(t *testutil.TestRunner) { + fns := map[string]func([]byte) func(){ + "OnceFunc": func(buf []byte) func() { + return sync.OnceFunc(func() { buf[0] = 1 }) + }, + "OnceValue": func(buf []byte) func() { + f := sync.OnceValue(func() any { buf[0] = 1; return nil }) + return func() { f() } + }, + "OnceValues": func(buf []byte) func() { + f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil }) + return func() { f() } + }, + } + for n, fn := range fns { + t.Run(n, func(t testing.TB) { + buf := make([]byte, 1024) + var gc atomic.Bool + runtime.SetFinalizer(&buf[0], func(_ *byte) { + gc.Store(true) + }) + f := fn(buf) + gcwaitfin() + if gc.Load() != false { + t.Fatal("wrapped function garbage collected too early") + } + f() + gcwaitfin() + if gc.Load() != true { + // Even if f is still alive, the function passed to Once(Func|Value|Values) + // is not kept alive after the first call to f. + t.Fatal("wrapped function should be garbage collected, but still live") + } + f() + }) + } +} + +// gcwaitfin performs garbage collection and waits for all finalizers to run. +func gcwaitfin() { + runtime.GC() + runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64) +} + +//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue +func runtime_blockUntilEmptyFinalizerQueue(int64) bool diff --git a/cannon/testdata/example/mt-pool/export_test_copy.go b/cannon/testdata/example/mt-pool/export_test_copy.go new file mode 100644 index 00000000000..37b7c4a92bf --- /dev/null +++ b/cannon/testdata/example/mt-pool/export_test_copy.go @@ -0,0 +1,86 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/export_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +// Export for testing. +// var Runtime_Semacquire = runtime_Semacquire +// var Runtime_Semrelease = runtime_Semrelease +var Runtime_procPin = runtime_procPin +var Runtime_procUnpin = runtime_procUnpin + +// poolDequeue testing. +type PoolDequeue interface { + PushHead(val any) bool + PopHead() (any, bool) + PopTail() (any, bool) +} + +func NewPoolDequeue(n int) PoolDequeue { + d := &poolDequeue{ + vals: make([]eface, n), + } + // For testing purposes, set the head and tail indexes close + // to wrapping around. + d.headTail.Store(d.pack(1< ../../utils diff --git a/cannon/testdata/example/mt-pool/main.go b/cannon/testdata/example/mt-pool/main.go new file mode 100644 index 00000000000..2c138b07cd9 --- /dev/null +++ b/cannon/testdata/example/mt-pool/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestPool, "TestPool") + testutil.RunTest(TestPoolNew, "TestPoolNew") + testutil.RunTest(TestPoolGC, "TestPoolGC") + testutil.RunTest(TestPoolRelease, "TestPoolRelease") + testutil.RunTest(TestPoolStress, "TestPoolStress") + testutil.RunTest(TestPoolDequeue, "TestPoolDequeue") + testutil.RunTest(TestPoolChain, "TestPoolChain") + testutil.RunTest(TestNilPool, "TestNilPool") + + fmt.Println("Pool test passed") +} diff --git a/cannon/testdata/example/mt-pool/pool_test_copy.go b/cannon/testdata/example/mt-pool/pool_test_copy.go new file mode 100644 index 00000000000..962cbfce7b5 --- /dev/null +++ b/cannon/testdata/example/mt-pool/pool_test_copy.go @@ -0,0 +1,298 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/pool_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "runtime" + "runtime/debug" + . "sync" + "sync/atomic" + "testing" + "time" + + "utils/testutil" +) + +var short bool = true + +func TestPool(t *testutil.TestRunner) { + // disable GC so we can control when it happens. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + var p Pool + if p.Get() != nil { + t.Fatal("expected empty") + } + + // Make sure that the goroutine doesn't migrate to another P + // between Put and Get calls. + Runtime_procPin() + p.Put("a") + p.Put("b") + if g := p.Get(); g != "a" { + t.Fatalf("got %#v; want a", g) + } + if g := p.Get(); g != "b" { + t.Fatalf("got %#v; want b", g) + } + if g := p.Get(); g != nil { + t.Fatalf("got %#v; want nil", g) + } + Runtime_procUnpin() + + // Put in a large number of objects so they spill into + // stealable space. + for i := 0; i < 100; i++ { + p.Put("c") + } + // After one GC, the victim cache should keep them alive. + runtime.GC() + if g := p.Get(); g != "c" { + t.Fatalf("got %#v; want c after GC", g) + } + // A second GC should drop the victim cache. + runtime.GC() + if g := p.Get(); g != nil { + t.Fatalf("got %#v; want nil after second GC", g) + } +} + +func TestPoolNew(t *testutil.TestRunner) { + // disable GC so we can control when it happens. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + i := 0 + p := Pool{ + New: func() any { + i++ + return i + }, + } + if v := p.Get(); v != 1 { + t.Fatalf("got %v; want 1", v) + } + if v := p.Get(); v != 2 { + t.Fatalf("got %v; want 2", v) + } + + // Make sure that the goroutine doesn't migrate to another P + // between Put and Get calls. + Runtime_procPin() + p.Put(42) + if v := p.Get(); v != 42 { + t.Fatalf("got %v; want 42", v) + } + Runtime_procUnpin() + + if v := p.Get(); v != 3 { + t.Fatalf("got %v; want 3", v) + } +} + +// Test that Pool does not hold pointers to previously cached resources. +func TestPoolGC(t *testutil.TestRunner) { + testPool(t, true) +} + +// Test that Pool releases resources on GC. +func TestPoolRelease(t *testutil.TestRunner) { + testPool(t, false) +} + +func testPool(t testing.TB, drain bool) { + var p Pool + const N = 100 +loop: + for try := 0; try < 3; try++ { + if try == 1 && short { + break + } + var fin, fin1 uint32 + for i := 0; i < N; i++ { + v := new(string) + runtime.SetFinalizer(v, func(vv *string) { + atomic.AddUint32(&fin, 1) + }) + p.Put(v) + } + if drain { + for i := 0; i < N; i++ { + p.Get() + } + } + for i := 0; i < 5; i++ { + runtime.GC() + time.Sleep(time.Duration(i*100+10) * time.Millisecond) + // 1 pointer can remain on stack or elsewhere + if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { + continue loop + } + } + t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) + } +} + +func TestPoolStress(t *testutil.TestRunner) { + const P = 10 + N := int(1e6) + if short { + N /= 100 + } + var p Pool + done := make(chan bool) + for i := 0; i < P; i++ { + go func() { + var v any = 0 + for j := 0; j < N; j++ { + if v == nil { + v = 0 + } + p.Put(v) + v = p.Get() + if v != nil && v.(int) != 0 { + t.Errorf("expect 0, got %v", v) + break + } + } + done <- true + }() + } + for i := 0; i < P; i++ { + <-done + } +} + +func TestPoolDequeue(t *testutil.TestRunner) { + testPoolDequeue(t, NewPoolDequeue(16)) +} + +func TestPoolChain(t *testutil.TestRunner) { + testPoolDequeue(t, NewPoolChain()) +} + +func testPoolDequeue(t testing.TB, d PoolDequeue) { + const P = 10 + var N int = 2e6 + if short { + N = 1e3 + } + have := make([]int32, N) + var stop int32 + var wg WaitGroup + record := func(val int) { + atomic.AddInt32(&have[val], 1) + if val == N-1 { + atomic.StoreInt32(&stop, 1) + } + } + + // Start P-1 consumers. + for i := 1; i < P; i++ { + wg.Add(1) + go func() { + fail := 0 + for atomic.LoadInt32(&stop) == 0 { + val, ok := d.PopTail() + if ok { + fail = 0 + record(val.(int)) + } else { + // Speed up the test by + // allowing the pusher to run. + if fail++; fail%100 == 0 { + runtime.Gosched() + } + } + } + wg.Done() + }() + } + + // Start 1 producer. + nPopHead := 0 + wg.Add(1) + go func() { + for j := 0; j < N; j++ { + for !d.PushHead(j) { + // Allow a popper to run. + runtime.Gosched() + } + if j%10 == 0 { + val, ok := d.PopHead() + if ok { + nPopHead++ + record(val.(int)) + } + } + } + wg.Done() + }() + wg.Wait() + + // Check results. + for i, count := range have { + if count != 1 { + t.Errorf("expected have[%d] = 1, got %d", i, count) + } + } + // Check that at least some PopHeads succeeded. We skip this + // check in short mode because it's common enough that the + // queue will stay nearly empty all the time and a PopTail + // will happen during the window between every PushHead and + // PopHead. + if !short && nPopHead == 0 { + t.Errorf("popHead never succeeded") + } +} + +func TestNilPool(t *testutil.TestRunner) { + catch := func() { + if recover() == nil { + t.Error("expected panic") + } + } + + var p *Pool + t.Run("Get", func(t testing.TB) { + defer catch() + if p.Get() != nil { + t.Error("expected empty") + } + t.Error("should have panicked already") + }) + t.Run("Put", func(t testing.TB) { + defer catch() + p.Put("a") + t.Error("should have panicked already") + }) +} diff --git a/cannon/testdata/example/mt-pool/poolqueue_copy.go b/cannon/testdata/example/mt-pool/poolqueue_copy.go new file mode 100644 index 00000000000..5aa7b1ffa7b --- /dev/null +++ b/cannon/testdata/example/mt-pool/poolqueue_copy.go @@ -0,0 +1,338 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/poolqueue.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "sync/atomic" + "unsafe" +) + +// poolDequeue is a lock-free fixed-size single-producer, +// multi-consumer queue. The single producer can both push and pop +// from the head, and consumers can pop from the tail. +// +// It has the added feature that it nils out unused slots to avoid +// unnecessary retention of objects. This is important for sync.Pool, +// but not typically a property considered in the literature. +type poolDequeue struct { + // headTail packs together a 32-bit head index and a 32-bit + // tail index. Both are indexes into vals modulo len(vals)-1. + // + // tail = index of oldest data in queue + // head = index of next slot to fill + // + // Slots in the range [tail, head) are owned by consumers. + // A consumer continues to own a slot outside this range until + // it nils the slot, at which point ownership passes to the + // producer. + // + // The head index is stored in the most-significant bits so + // that we can atomically add to it and the overflow is + // harmless. + headTail atomic.Uint64 + + // vals is a ring buffer of interface{} values stored in this + // dequeue. The size of this must be a power of 2. + // + // vals[i].typ is nil if the slot is empty and non-nil + // otherwise. A slot is still in use until *both* the tail + // index has moved beyond it and typ has been set to nil. This + // is set to nil atomically by the consumer and read + // atomically by the producer. + vals []eface +} + +type eface struct { + typ, val unsafe.Pointer +} + +const dequeueBits = 32 + +// dequeueLimit is the maximum size of a poolDequeue. +// +// This must be at most (1<> dequeueBits) & mask) + tail = uint32(ptrs & mask) + return +} + +func (d *poolDequeue) pack(head, tail uint32) uint64 { + const mask = 1<= dequeueLimit { + // Can't make it any bigger. + newSize = dequeueLimit + } + + d2 := &poolChainElt{prev: d} + d2.vals = make([]eface, newSize) + c.head = d2 + storePoolChainElt(&d.next, d2) + d2.pushHead(val) +} + +func (c *poolChain) popHead() (any, bool) { + d := c.head + for d != nil { + if val, ok := d.popHead(); ok { + return val, ok + } + // There may still be unconsumed elements in the + // previous dequeue, so try backing up. + d = loadPoolChainElt(&d.prev) + } + return nil, false +} + +func (c *poolChain) popTail() (any, bool) { + d := loadPoolChainElt(&c.tail) + if d == nil { + return nil, false + } + + for { + // It's important that we load the next pointer + // *before* popping the tail. In general, d may be + // transiently empty, but if next is non-nil before + // the pop and the pop fails, then d is permanently + // empty, which is the only condition under which it's + // safe to drop d from the chain. + d2 := loadPoolChainElt(&d.next) + + if val, ok := d.popTail(); ok { + return val, ok + } + + if d2 == nil { + // This is the only dequeue. It's empty right + // now, but could be pushed to in the future. + return nil, false + } + + // The tail of the chain has been drained, so move on + // to the next dequeue. Try to drop it from the chain + // so the next pop doesn't have to look at the empty + // dequeue again. + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) { + // We won the race. Clear the prev pointer so + // the garbage collector can collect the empty + // dequeue and so popHead doesn't back up + // further than necessary. + storePoolChainElt(&d2.prev, nil) + } + d = d2 + } +} diff --git a/cannon/testdata/example/mt-pool/runtime.go b/cannon/testdata/example/mt-pool/runtime.go new file mode 100644 index 00000000000..1b6dbe3e6cd --- /dev/null +++ b/cannon/testdata/example/mt-pool/runtime.go @@ -0,0 +1,11 @@ +package main + +import ( + _ "unsafe" // Required for go:linkname +) + +//go:linkname runtime_procPin runtime.procPin +func runtime_procPin() int + +//go:linkname runtime_procUnpin runtime.procUnpin +func runtime_procUnpin() diff --git a/cannon/testdata/example/mt-rwmutex/go.mod b/cannon/testdata/example/mt-rwmutex/go.mod new file mode 100644 index 00000000000..a0a433e9119 --- /dev/null +++ b/cannon/testdata/example/mt-rwmutex/go.mod @@ -0,0 +1,5 @@ +module rwmutex + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/example/mt-rwmutex/main.go b/cannon/testdata/example/mt-rwmutex/main.go new file mode 100644 index 00000000000..8553bba75ef --- /dev/null +++ b/cannon/testdata/example/mt-rwmutex/main.go @@ -0,0 +1,226 @@ +// Portions of this code are derived from code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/400433af3660905ecaceaf19ddad3e6c24b141df/src/sync/rwmutex_test.go +// +// --- Original License Notice --- +// +// Copyright 2009 The Go Authors. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google LLC nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package main + +import ( + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +func main() { + TestParallelReaders() + TestRLocker() + TestRWMutex() + + fmt.Println("RWMutex test passed") +} + +func parallelReader(m *sync.RWMutex, clocked, cunlock, cdone chan bool) { + m.RLock() + clocked <- true + <-cunlock + m.RUnlock() + cdone <- true +} + +func doTestParallelReaders(numReaders, gomaxprocs int) { + runtime.GOMAXPROCS(gomaxprocs) + var m sync.RWMutex + clocked := make(chan bool) + cunlock := make(chan bool) + cdone := make(chan bool) + for i := 0; i < numReaders; i++ { + go parallelReader(&m, clocked, cunlock, cdone) + } + // Wait for all parallel RLock()s to succeed. + for i := 0; i < numReaders; i++ { + <-clocked + } + for i := 0; i < numReaders; i++ { + cunlock <- true + } + // Wait for the goroutines to finish. + for i := 0; i < numReaders; i++ { + <-cdone + } +} + +func TestParallelReaders() { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + doTestParallelReaders(1, 4) + doTestParallelReaders(3, 4) + doTestParallelReaders(4, 2) +} + +func reader(rwm *sync.RWMutex, num_iterations int, activity *int32, cdone chan bool) { + for i := 0; i < num_iterations; i++ { + rwm.RLock() + n := atomic.AddInt32(activity, 1) + if n < 1 || n >= 10000 { + rwm.RUnlock() + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -1) + rwm.RUnlock() + } + cdone <- true +} + +func writer(rwm *sync.RWMutex, num_iterations int, activity *int32, cdone chan bool) { + for i := 0; i < num_iterations; i++ { + rwm.Lock() + n := atomic.AddInt32(activity, 10000) + if n != 10000 { + rwm.Unlock() + panic(fmt.Sprintf("wlock(%d)\n", n)) + } + for i := 0; i < 100; i++ { + } + atomic.AddInt32(activity, -10000) + rwm.Unlock() + } + cdone <- true +} + +func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { + runtime.GOMAXPROCS(gomaxprocs) + // Number of active readers + 10000 * number of active writers. + var activity int32 + var rwm sync.RWMutex + cdone := make(chan bool) + go writer(&rwm, num_iterations, &activity, cdone) + var i int + for i = 0; i < numReaders/2; i++ { + go reader(&rwm, num_iterations, &activity, cdone) + } + go writer(&rwm, num_iterations, &activity, cdone) + for ; i < numReaders; i++ { + go reader(&rwm, num_iterations, &activity, cdone) + } + // Wait for the 2 writers and all readers to finish. + for i := 0; i < 2+numReaders; i++ { + <-cdone + } +} + +func TestRWMutex() { + var m sync.RWMutex + + m.Lock() + if m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock succeeded with mutex locked") + os.Exit(1) + } + if m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock succeeded with mutex locked") + os.Exit(1) + } + m.Unlock() + + if !m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock failed with mutex unlocked") + os.Exit(1) + } + m.Unlock() + + if !m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock failed with mutex unlocked") + os.Exit(1) + } + if !m.TryRLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryRLock failed with mutex unlocked") + os.Exit(1) + } + if m.TryLock() { + _, _ = fmt.Fprintln(os.Stderr, "TryLock succeeded with mutex rlocked") + os.Exit(1) + } + m.RUnlock() + m.RUnlock() + + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) + n := 5 + + HammerRWMutex(1, 1, n) + HammerRWMutex(1, 3, n) + HammerRWMutex(1, 10, n) + HammerRWMutex(4, 1, n) + HammerRWMutex(4, 3, n) + HammerRWMutex(4, 10, n) + HammerRWMutex(10, 1, n) + HammerRWMutex(10, 3, n) + HammerRWMutex(10, 10, n) + HammerRWMutex(10, 5, n) +} + +func TestRLocker() { + var wl sync.RWMutex + var rl sync.Locker + wlocked := make(chan bool, 1) + rlocked := make(chan bool, 1) + rl = wl.RLocker() + n := 10 + go func() { + for i := 0; i < n; i++ { + rl.Lock() + rl.Lock() + rlocked <- true + wl.Lock() + wlocked <- true + } + }() + for i := 0; i < n; i++ { + <-rlocked + rl.Unlock() + select { + case <-wlocked: + _, _ = fmt.Fprintln(os.Stderr, "RLocker() didn't read-lock it") + os.Exit(1) + default: + } + rl.Unlock() + <-wlocked + select { + case <-rlocked: + _, _ = fmt.Fprintln(os.Stderr, "RLocker() didn't respect the write lock") + os.Exit(1) + default: + } + wl.Unlock() + } +} diff --git a/cannon/testdata/example/mt-value/go.mod b/cannon/testdata/example/mt-value/go.mod new file mode 100644 index 00000000000..602687cbcca --- /dev/null +++ b/cannon/testdata/example/mt-value/go.mod @@ -0,0 +1,8 @@ +module mtvalue + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-value/main.go b/cannon/testdata/example/mt-value/main.go new file mode 100644 index 00000000000..51fd1b2b730 --- /dev/null +++ b/cannon/testdata/example/mt-value/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestValue, "TestValue") + testutil.RunTest(TestValueLarge, "TestValueLarge") + testutil.RunTest(TestValuePanic, "TestValuePanic") + testutil.RunTest(TestValueConcurrent, "TestValueConcurrent") + testutil.RunTest(TestValue_Swap, "TestValue_Swap") + testutil.RunTest(TestValueSwapConcurrent, "TestValueSwapConcurrent") + testutil.RunTest(TestValue_CompareAndSwap, "TestValue_CompareAndSwap") + testutil.RunTest(TestValueCompareAndSwapConcurrent, "TestValueCompareAndSwapConcurrent") + + fmt.Println("Value tests passed") +} diff --git a/cannon/testdata/example/mt-value/value_test_copy.go b/cannon/testdata/example/mt-value/value_test_copy.go new file mode 100644 index 00000000000..32ccc006740 --- /dev/null +++ b/cannon/testdata/example/mt-value/value_test_copy.go @@ -0,0 +1,312 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/atomic/value_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + "math/rand" + "runtime" + "strconv" + "sync" + "sync/atomic" + . "sync/atomic" + "testing" + + "utils/testutil" +) + +var short bool = true + +func TestValue(t *testutil.TestRunner) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testutil.TestRunner) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testutil.TestRunner) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testutil.TestRunner) { + tests := [][]any{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + N := int(1e5) + if short { + p /= 2 + //N = 1e3 + N = 1e2 + } + for _, test := range tests { + var v Value + done := make(chan bool, p) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + expected := true + loop: + for j := 0; j < N; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + expected = false + break + } + done <- expected + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} + +var Value_SwapTests = []struct { + init any + new any + want any + err any +}{ + {init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"}, + {init: nil, new: true, want: nil, err: nil}, + {init: true, new: "", err: "sync/atomic: swap of inconsistently typed value into Value"}, + {init: true, new: false, want: true, err: nil}, +} + +func TestValue_Swap(t *testutil.TestRunner) { + for i, tt := range Value_SwapTests { + t.Run(strconv.Itoa(i), func(t testing.TB) { + var v Value + if tt.init != nil { + v.Store(tt.init) + } + defer func() { + err := recover() + switch { + case tt.err == nil && err != nil: + t.Errorf("should not panic, got %v", err) + case tt.err != nil && err == nil: + t.Errorf("should panic %v, got ", tt.err) + } + }() + if got := v.Swap(tt.new); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + if got := v.Load(); got != tt.new { + t.Errorf("got %v, want %v", got, tt.new) + } + }) + } +} + +func TestValueSwapConcurrent(t *testutil.TestRunner) { + var v Value + var count uint64 + var g sync.WaitGroup + var m, n uint64 = 10000, 10000 + if short { + //m = 1000 + //n = 1000 + m = 10 + n = 10 + } + for i := uint64(0); i < m*n; i += n { + i := i + g.Add(1) + go func() { + var c uint64 + for new := i; new < i+n; new++ { + if old := v.Swap(new); old != nil { + c += old.(uint64) + } + } + atomic.AddUint64(&count, c) + g.Done() + }() + } + g.Wait() + if want, got := (m*n-1)*(m*n)/2, count+v.Load().(uint64); got != want { + t.Errorf("sum from 0 to %d was %d, want %v", m*n-1, got, want) + } +} + +var heapA, heapB = struct{ uint }{0}, struct{ uint }{0} + +var Value_CompareAndSwapTests = []struct { + init any + new any + old any + want bool + err any +}{ + {init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"}, + {init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"}, + {init: nil, new: true, old: true, want: false, err: nil}, + {init: nil, new: true, old: nil, want: true, err: nil}, + {init: true, new: "", err: "sync/atomic: compare and swap of inconsistently typed value into Value"}, + {init: true, new: true, old: false, want: false, err: nil}, + {init: true, new: true, old: true, want: true, err: nil}, + {init: heapA, new: struct{ uint }{1}, old: heapB, want: true, err: nil}, +} + +func TestValue_CompareAndSwap(t *testutil.TestRunner) { + for i, tt := range Value_CompareAndSwapTests { + t.Run(strconv.Itoa(i), func(t testing.TB) { + var v Value + if tt.init != nil { + v.Store(tt.init) + } + defer func() { + err := recover() + switch { + case tt.err == nil && err != nil: + t.Errorf("got %v, wanted no panic", err) + case tt.err != nil && err == nil: + t.Errorf("did not panic, want %v", tt.err) + } + }() + if got := v.CompareAndSwap(tt.old, tt.new); got != tt.want { + t.Errorf("got %v, want %v", got, tt.want) + } + }) + } +} + +func TestValueCompareAndSwapConcurrent(t *testutil.TestRunner) { + var v Value + var w sync.WaitGroup + v.Store(0) + m, n := 1000, 100 + if short { + //m = 100 + //n = 100 + m = 10 + n = 10 + } + for i := 0; i < m; i++ { + i := i + w.Add(1) + go func() { + for j := i; j < m*n; runtime.Gosched() { + if v.CompareAndSwap(j, j+1) { + j += m + } + } + w.Done() + }() + } + w.Wait() + if stop := v.Load().(int); stop != m*n { + t.Errorf("did not get to %v, stopped at %v", m*n, stop) + } +} diff --git a/cannon/testdata/example/mt-wg/go.mod b/cannon/testdata/example/mt-wg/go.mod new file mode 100644 index 00000000000..0c10638b3d1 --- /dev/null +++ b/cannon/testdata/example/mt-wg/go.mod @@ -0,0 +1,8 @@ +module wg + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/mt-wg/main.go b/cannon/testdata/example/mt-wg/main.go new file mode 100644 index 00000000000..cfdb3e56b8e --- /dev/null +++ b/cannon/testdata/example/mt-wg/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(TestWaitGroup, "TestWaitGroup") + testutil.RunTest(TestWaitGroupMisuse, "TestWaitGroupMisuse") + testutil.RunTest(TestWaitGroupRace, "TestWaitGroupRace") + testutil.RunTest(TestWaitGroupAlign, "TestWaitGroupAlign") + + fmt.Println("WaitGroup tests passed") +} diff --git a/cannon/testdata/example/mt-wg/waitgroup_test_copy.go b/cannon/testdata/example/mt-wg/waitgroup_test_copy.go new file mode 100644 index 00000000000..81f0cd0ce4c --- /dev/null +++ b/cannon/testdata/example/mt-wg/waitgroup_test_copy.go @@ -0,0 +1,130 @@ +// This file is based on code written by The Go Authors. +// See original source: https://github.com/golang/go/blob/go1.22.7/src/sync/waitgroup_test.go +// +// --- Original License Notice --- +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package main + +import ( + . "sync" + "sync/atomic" + "testing" + + "utils/testutil" +) + +func testWaitGroup(t testing.TB, wg1 *WaitGroup, wg2 *WaitGroup) { + n := 16 + wg1.Add(n) + wg2.Add(n) + exited := make(chan bool, n) + for i := 0; i != n; i++ { + go func() { + wg1.Done() + wg2.Wait() + exited <- true + }() + } + wg1.Wait() + for i := 0; i != n; i++ { + select { + case <-exited: + t.Fatal("WaitGroup released group too soon") + default: + } + wg2.Done() + } + for i := 0; i != n; i++ { + <-exited // Will block if barrier fails to unlock someone. + } +} + +func TestWaitGroup(t *testutil.TestRunner) { + wg1 := &WaitGroup{} + wg2 := &WaitGroup{} + + // Run the same test a few times to ensure barrier is in a proper state. + for i := 0; i != 8; i++ { + testWaitGroup(t, wg1, wg2) + } +} + +func TestWaitGroupMisuse(t *testutil.TestRunner) { + defer func() { + err := recover() + if err != "sync: negative WaitGroup counter" { + t.Fatalf("Unexpected panic: %#v", err) + } + }() + wg := &WaitGroup{} + wg.Add(1) + wg.Done() + wg.Done() + t.Fatal("Should panic") +} + +func TestWaitGroupRace(t *testutil.TestRunner) { + // Run this test for about 1ms. + for i := 0; i < 1000; i++ { + wg := &WaitGroup{} + n := new(int32) + // spawn goroutine 1 + wg.Add(1) + go func() { + atomic.AddInt32(n, 1) + wg.Done() + }() + // spawn goroutine 2 + wg.Add(1) + go func() { + atomic.AddInt32(n, 1) + wg.Done() + }() + // Wait for goroutine 1 and 2 + wg.Wait() + if atomic.LoadInt32(n) != 2 { + t.Fatal("Spurious wakeup from Wait") + } + } +} + +func TestWaitGroupAlign(t *testutil.TestRunner) { + type X struct { + x byte + wg WaitGroup + } + var x X + x.wg.Add(1) + go func(x *X) { + x.wg.Done() + }(&x) + x.wg.Wait() +} diff --git a/cannon/testdata/example/utilscheck/go.mod b/cannon/testdata/example/utilscheck/go.mod new file mode 100644 index 00000000000..5a109a7ca8c --- /dev/null +++ b/cannon/testdata/example/utilscheck/go.mod @@ -0,0 +1,8 @@ +module utilscheck + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck/main.go b/cannon/testdata/example/utilscheck/main.go new file mode 100644 index 00000000000..ad66a3211ad --- /dev/null +++ b/cannon/testdata/example/utilscheck/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Fail() +} diff --git a/cannon/testdata/example/utilscheck2/go.mod b/cannon/testdata/example/utilscheck2/go.mod new file mode 100644 index 00000000000..ee0430168ad --- /dev/null +++ b/cannon/testdata/example/utilscheck2/go.mod @@ -0,0 +1,8 @@ +module utilscheck2 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck2/main.go b/cannon/testdata/example/utilscheck2/main.go new file mode 100644 index 00000000000..6fc619f18fe --- /dev/null +++ b/cannon/testdata/example/utilscheck2/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "testing" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Run("subtest 1", func(t testing.TB) { + // Do something + }) + + t.Run("subtest 2", func(t testing.TB) { + t.Fail() + }) +} diff --git a/cannon/testdata/example/utilscheck3/go.mod b/cannon/testdata/example/utilscheck3/go.mod new file mode 100644 index 00000000000..3bc116499be --- /dev/null +++ b/cannon/testdata/example/utilscheck3/go.mod @@ -0,0 +1,8 @@ +module utilscheck3 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck3/main.go b/cannon/testdata/example/utilscheck3/main.go new file mode 100644 index 00000000000..248c891808b --- /dev/null +++ b/cannon/testdata/example/utilscheck3/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "fmt" + "testing" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + t.Run("panic test", func(t testing.TB) { + panic("oops") + }) +} diff --git a/cannon/testdata/example/utilscheck4/go.mod b/cannon/testdata/example/utilscheck4/go.mod new file mode 100644 index 00000000000..7f80460beb9 --- /dev/null +++ b/cannon/testdata/example/utilscheck4/go.mod @@ -0,0 +1,8 @@ +module utilscheck4 + +go 1.22 + +toolchain go1.22.0 + +require utils v0.0.0 +replace utils => ../../utils diff --git a/cannon/testdata/example/utilscheck4/main.go b/cannon/testdata/example/utilscheck4/main.go new file mode 100644 index 00000000000..deb78e2cb4d --- /dev/null +++ b/cannon/testdata/example/utilscheck4/main.go @@ -0,0 +1,17 @@ +package main + +import ( + "fmt" + + "utils/testutil" +) + +func main() { + testutil.RunTest(ShouldFail, "ShouldFail") + + fmt.Println("Passed test that should have failed") +} + +func ShouldFail(t *testutil.TestRunner) { + panic("oops") +} diff --git a/cannon/testdata/utils/go.mod b/cannon/testdata/utils/go.mod new file mode 100644 index 00000000000..45f262e0b16 --- /dev/null +++ b/cannon/testdata/utils/go.mod @@ -0,0 +1,5 @@ +module utils + +go 1.22 + +toolchain go1.22.0 diff --git a/cannon/testdata/utils/testutil/testing.go b/cannon/testdata/utils/testutil/testing.go new file mode 100644 index 00000000000..f6c79c63655 --- /dev/null +++ b/cannon/testdata/utils/testutil/testing.go @@ -0,0 +1,170 @@ +package testutil + +import ( + "fmt" + "os" + "runtime" + "sync" + "testing" +) + +func RunTest(testFunc func(*TestRunner), name string) { + goRunTest(name, testFunc, newTestRunner(name)) +} + +type TestRunner struct { + *mockT + baseName string +} + +func newTestRunner(baseName string) *TestRunner { + return &TestRunner{mockT: newMockT(), baseName: baseName} +} + +func (r *TestRunner) Run(name string, testFunc func(t testing.TB)) bool { + testName := r.baseName + if name != "" { + testName = fmt.Sprintf("%v (%v)", r.baseName, name) + } + + var tester testing.TB = r + goRunTest(testName, testFunc, tester) + return !r.Failed() +} + +func goRunTest[T testing.TB](testName string, testFunc func(t T), t T) { + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer func() { + if err := recover(); err != nil { + fmt.Printf("Test panicked: %v\n\t%v", testName, err) + os.Exit(1) + } + + if t.Failed() { + fmt.Printf("Test failed: %v\n", testName) + os.Exit(1) + } else if t.Skipped() { + fmt.Printf("Test skipped: %v\n", testName) + } else { + fmt.Printf("Test passed: %v\n", testName) + } + + wg.Done() + }() + + testFunc(t) + }() + + wg.Wait() +} + +type mockT struct { + *testing.T + mu sync.Mutex + failed bool + skipped bool +} + +var _ testing.TB = (*mockT)(nil) + +func newMockT() *mockT { + return &mockT{} +} + +func (t *mockT) Cleanup(func()) { + t.Fatalf("Cleanup not supported") +} + +func (t *mockT) Error(args ...any) { + fmt.Print(args...) + t.fail() +} + +func (t *mockT) Errorf(format string, args ...any) { + fmt.Printf(format, args...) + t.fail() +} + +func (t *mockT) Fail() { + t.fail() +} + +func (t *mockT) FailNow() { + fmt.Println("Fatal") + t.fail() +} + +func (t *mockT) Failed() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.failed +} + +func (t *mockT) Fatal(args ...any) { + fmt.Print(args...) + t.fail() +} + +func (t *mockT) Fatalf(format string, args ...any) { + fmt.Printf(format, args...) + t.fail() +} + +func (t *mockT) Helper() {} + +func (t *mockT) Log(args ...any) { + fmt.Print(args...) +} + +func (t *mockT) Logf(format string, args ...any) { + fmt.Printf(format, args...) +} + +func (t *mockT) Name() string { + return "" +} + +func (t *mockT) Setenv(key, value string) { + t.Fatalf("Setenv not supported") +} + +func (t *mockT) Skip(args ...any) { + fmt.Println(args...) + t.skip() +} + +func (t *mockT) SkipNow() { + t.skip() +} + +func (t *mockT) Skipf(format string, args ...any) { + fmt.Printf(format, args...) + t.skip() +} +func (t *mockT) Skipped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.skipped +} + +func (t *mockT) skip() { + t.mu.Lock() + defer t.mu.Unlock() + t.skipped = true + runtime.Goexit() +} + +func (t *mockT) fail() { + t.mu.Lock() + defer t.mu.Unlock() + t.failed = true + runtime.Goexit() +} + +func (t *mockT) TempDir() string { + t.Fatalf("TempDir not supported") + return "" +} diff --git a/docker-bake.hcl b/docker-bake.hcl index b53c7add0f8..53fd8e865be 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -207,7 +207,7 @@ target "proofs-tools" { context = "." args = { CHALLENGER_VERSION="b46bffed42db3442d7484f089278d59f51503049" - KONA_VERSION="kona-client-v0.1.0-alpha.7" + KONA_VERSION="kona-client-v0.1.0-beta.1" } target="proofs-tools" platforms = split(",", PLATFORMS) diff --git a/docs/handbook/pr-guidelines.md b/docs/handbook/pr-guidelines.md index cf133ada6c5..57913ebeaa5 100644 --- a/docs/handbook/pr-guidelines.md +++ b/docs/handbook/pr-guidelines.md @@ -46,4 +46,4 @@ This is organized by current state of PR, so it can be easily referenced frequen ### Merging PRs - **Resolve all Comments**: Comments can be resolved by (1) the PR author for nits/optionals, (2) the author or reviewer after discussions, or (3) extracting the comment into an issue to address in a future PR. For (3), ensure the new issue links to the specific comment thread. This is currently enforced by GitHub's merge requirements. -- **Other Standard Merge Requirements**: The PR must be approved by the appropriate reviewers, CI must passing, and other standard merge requirements apply. +- **Other Standard Merge Requirements**: The PR must be approved by the appropriate reviewers, CI must pass, and other standard merge requirements apply. diff --git a/docs/postmortems/2022-02-02-inflation-vuln.md b/docs/postmortems/2022-02-02-inflation-vuln.md index a755b0fdfe3..a2a23e38230 100644 --- a/docs/postmortems/2022-02-02-inflation-vuln.md +++ b/docs/postmortems/2022-02-02-inflation-vuln.md @@ -58,7 +58,7 @@ timeline and activities were as follows: (Using github handles as identifiers) - 2022-02-02 1625: smartcontracts receives an e-mail from saurik claiming to have found a critical - issue in L2Geth. E-mail was sent to securityoptimism.io. + issue in L2Geth. E-mail was sent to security@optimism.io. - 2022-02-02 X: saurik messaged smartcontracts on Discord to make sure we checked the e-mail since he knew we had a prior problem where security advisories went to spam. - 2022-02-02 1650: Huddle begins in #security on Slack. diff --git a/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md b/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md new file mode 100644 index 00000000000..f157356e490 --- /dev/null +++ b/docs/security-reviews/2024_10-Cannon-FGETFD-3DocSecurity.md @@ -0,0 +1,114 @@ +# Audit Report - OP Cannon + +| | | +| -------------- | ------------------------------------------------------------------------- | +| **Audit Date** | Oct 2nd 2024 - Oct 3rd 2024 | +| **Auditor** | 3DOC Security ([@3docSec](https://x.com/3docSec)) | +| **Version 1** | Oct 3rd 2024. | + +
+ +# Contents +- [Audit Report - OP cannon](#audit-report---op-cannon) +- [Contents](#contents) +- [Disclaimer](#disclaimer) +- [About 3DOC](#about-3doc) +- [Scope](#scope) +- [Severity Classification](#severity-classification) +- [Summary](#summary) +- [Findings](#findings) + - [Low Risk Findings (1)](#low-risk-findings-1) + - [1. Op-challenger Docker image does not include Cannon embeds](#-op-challenger-docker-image-does-not-include-cannon-embeds) + +# Disclaimer +_The following audit report is based on the information and code provided by the client, and any findings or recommendations are made solely on the basis of this information. While the Auditor has exercised due care and skill in conducting the audit, it cannot be guaranteed that all issues have been identified and that there are no undiscovered errors or vulnerabilities in the code._ + +_Furthermore, this report is not an endorsement or certification of the protocol, and the Auditor does not assume any responsibility for any losses or damages that may result from the use of the smart contracts, either in their current form or in any modified version thereof._ + +# About 3DOC +3DOC is a top ranked Smart Contract Auditor doing audits on Code4rena (www.code4rena.com), having ranked 1st in multiple contests in [solo](https://code4rena.com/@3docSec) and [team](https://code4rena.com/@RadiantLabs) audits, including the [Optimism superchain contest](https://code4rena.com/audits/2024-07-optimism-superchain) in July 2024.
+He can also be booked for conducting Private Audits. + +Contact:
+ +X: [@3DocSec](https://x.com/3DocSec) + +e-mail: [hello@3doc.fr](mailto:hello@3doc.fr) + +# Scope +The scope of the audit is the following Pull Request in the client's GitHub repository: + +https://github.com/ethereum-optimism/optimism/pull/12050 + +The change consists of a core update for supporting the `F_GETFD` syscall in the MIPS VM, [provided with this commit](https://github.com/ethereum-optimism/optimism/pull/12050/commits/7c8257d3574a2a76ab90f8129c7b532d68049944), and several additional updates accommodating the VM version bump that came with the core change. + +# Severity Classification +| Severity | Impact: High | Impact: Medium | Impact: Low | +| ---------------------- | ------------ | -------------- | ----------- | +| **Likelihood: High** | ![high] | ![high] | ![medium] | +| **Likelihood: Medium** | ![high] | ![medium] | ![low] | +| **Likelihood: Low** | ![medium] | ![low] | ![low] | + +**Impact** - the technical, economic and reputation damage of a successful attack + +**Likelihood** - the chance that a particular vulnerability is discovered and exploited + +# Summary + +| Severity | Total | +| -------------- | ----- | +| ![high] | 0 | +| ![medium] | 0 | +| ![low] | 0 | +| ![information] | 0 | + + +# Findings +## Low Risk findings (0) + +### [False positive] Op-challenger Docker image does not include Cannon embeds +#### Description +The change in scope added a new implementation of the Cannon VM, which was called `VersionSingleThreaded2`. Cannon has now three versions (`VersionSingleThreaded`, `VersionSingleThreaded2`, and `VersionMultiThreaded`). + +The op-challenger program makes use of the Cannon VM in several places via the configured `VmBin` path, which point to the `multicannon` command line. This one reads the State version from the input state and selects the right Cannon VM accordingly (`cannon/multicannon/exec.go:L81`). + +If we look at the Docker challenger image generated by the `make golang-docker` command, however, we can see it doesn't contain an `embeds` folder: + +``` +docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger find / -name embeds +``` + +But it however has the `cannon` command pointing to the `multicannon` multiplexer: + +``` +➜ optimism git:(52d0e60c1) ✗ docker run -t us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger cannon | head -2 +NAME: + multicannon - MIPS Fault Proof tool +➜ optimism git:(52d0e60c1) ✗ +``` + +This issue appears to be pre-existing to the changes in scope; using Docker images to run the challenger is [mentioned as option](https://docs.optimism.io/builders/chain-operators/tools/op-challenger), but only as alternative option, hence the Low risk assessed for this finding. + +#### Impact +Because of this issue, challenger instances operated in a Docker container won't be able to function properly. + +#### Recommendation +Consider modifying the Docker build chain to include the `embeds` folder. +Consider extending the current e2e test suite to cover execution from Docker images. + +#### Discussion + +> @inphi The cannon-2 implementation that supports go1.22 is now embedded into the cannon cli binary. Note that these embeds are not actual files that you can find in the docker container filesystem. But rather an embedded filesystem inside the Go binary - https://pkg.go.dev/embed. + +> @3DOC Oh yes I see that. So those are included in an embedded filesystem, I missed that + + +[high]: https://img.shields.io/badge/-HIGH-b02319 "HIGH" +[medium]: https://img.shields.io/badge/-MEDIUM-orange "MEDIUM" +[low]: https://img.shields.io/badge/-LOW-FFD700 "LOW" +[information]: https://img.shields.io/badge/-INFORMATION-darkgreen "INFORMATION" +[fixed]: https://img.shields.io/badge/-FIXED-brightgreen "FIXED" +[acknowledged]: https://img.shields.io/badge/-ACKNOWLEDGED-blue "ACKNOWLEDGED" +[disputed]: https://img.shields.io/badge/-DISPUTED-lightgrey "DISPUTED" +[reported]: https://img.shields.io/badge/-REPORTED-lightblue "REPORTED" +[partiallyfixed]: https://img.shields.io/badge/-PARTIALLY_FIXED-lightgreen "PARTIALLTY FIXED" diff --git a/docs/security-reviews/README.md b/docs/security-reviews/README.md index 483dc154185..265d0a65f90 100644 --- a/docs/security-reviews/README.md +++ b/docs/security-reviews/README.md @@ -6,7 +6,7 @@ Each review is focused on a different part of the codebase, and at a different p Please see the report for the specific details. | Date | Reviewer | Focus and Scope | Report Link | Commit | Subsequent Release | -| ------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | ------------------- | +|---------|----------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |------------------------------------------------------------------------------------------------------------------------------------------------------------| -------------------------------------------- |---------------------| | 2020-10 | Trail of Bits | Rollup | [2020_10-TrailOfBits.pdf](./2020_10-Rollup-TrailOfBits.pdf) | | | | 2020-11 | Dapphub | ECDSA Wallet | [2020_11-Dapphub-ECDSA_Wallet.pdf](./2020_11-Dapphub-ECDSA_Wallet.pdf) | | | | 2021-03 | OpenZeppelin | OVM and Rollup | [2021_03-OVM_and_Rollup-OpenZeppelin.pdf](./2021_03-OVM_and_Rollup-OpenZeppelin.pdf) | | | @@ -25,7 +25,8 @@ Please see the report for the specific details. | 2024-02 | Runtime Verification | Pausability | [Kontrol Verification][kontrol] | | | | 2024-02 | Cantina | MCP L1: `OptimismPortal.sol`, `L1CrossDomainMessenger.sol`, `L1StandardBridge.sol`, `L1ERC721Bridge.sol`, `OptimismMintableERC20Factory.sol`, `L2OutputOracle.sol`, `SystemConfig.sol` | [2024_02-MCP_L1-Cantina.pdf](./2024_02-MCP_L1-Cantina.pdf) | e6ef3a900c42c8722e72c2e2314027f85d12ced5 | op-contracts/v1.3.0 | | 2024-03 | Sherlock | Fault Proofs | Sherlock Optimism Fault Proofs Contest ([site](https://audits.sherlock.xyz/contests/205), [repo](https://github.com/sherlock-audit/2024-02-optimism-2024)) | | | -| 2024-08 | Cantina | Fault proof no-MIPS: All contracts in the `packages/contracts-bedrock/src/dispute` directory | [./2024_08_Fault-Proofs-MIPS_Cantina.pdf](./2024_08_Fault-Proofs-MIPS_Cantina.pdf) | 1f7081798ce2d49b8643514663d10681cb853a3d | op-contracts/v1.4.0 | -| 2024-08 | Spearbit | Fault proof MIPS: `MIPS.sol` | [./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf](./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf) | 71b93116738ee98c9f8713b1a5dfe626ce06c1b2 | op-contracts/v1.6.0 | +| 2024-08 | Cantina | Fault proof MIPS: `MIPS.sol` | [./2024_08_Fault-Proofs-MIPS_Cantina.pdf](./2024_08_Fault-Proofs-MIPS_Cantina.pdf) | 71b93116738ee98c9f8713b1a5dfe626ce06c1b2 | op-contracts/v1.4.0 | +| 2024-08 | Spearbit | Fault proof no-MIPS: All contracts in the `packages/contracts-bedrock/src/dispute` directory | [./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf](./2024_08_Fault-Proofs-No-MIPS_Spearbit.pdf) | 1f7081798ce2d49b8643514663d10681cb853a3d | op-contracts/v1.6.0 | +| 2024-10 | 3Doc Security | Fault proof MIPS: `MIPS.sol` | [./2024_10-Cannon-FGETFD-3DocSecurity.md](./2024_10-Cannon-FGETFD-3DocSecurity.md) | 52d0e60c16498ad4efec8798e3fc1b36b13f46a2 | op-contracts/v1.8.0 | [kontrol]: https://github.com/ethereum-optimism/optimism/blob/876e16ad04968f0bb641eb76f98eb77e7e1a3e16/packages/contracts-bedrock/test/kontrol/README.md diff --git a/go.mod b/go.mod index d1be77ddc0e..5454732a74a 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/crate-crypto/go-kzg-4844 v1.0.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 - github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7 + github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241119111730-bee358f6d6e6 github.com/ethereum/go-ethereum v1.14.11 github.com/fsnotify/fsnotify v1.8.0 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb @@ -28,7 +28,7 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/klauspost/compress v1.17.11 - github.com/kurtosis-tech/kurtosis/api/golang v1.4.1 + github.com/kurtosis-tech/kurtosis/api/golang v1.4.2 github.com/libp2p/go-libp2p v0.36.2 github.com/libp2p/go-libp2p-mplex v0.9.0 github.com/libp2p/go-libp2p-pubsub v0.12.0 @@ -48,7 +48,7 @@ require ( github.com/urfave/cli/v2 v2.27.5 golang.org/x/crypto v0.28.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 golang.org/x/time v0.7.0 ) @@ -250,7 +250,7 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101411.2-rc.1 +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101411.2-rc.2 //replace github.com/ethereum/go-ethereum => ../go-ethereum diff --git a/go.sum b/go.sum index 5eabf529b0d..60b90554c2e 100644 --- a/go.sum +++ b/go.sum @@ -187,10 +187,10 @@ github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/u github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= -github.com/ethereum-optimism/op-geth v1.101411.2-rc.1 h1:v314tR5EzG+QNE9aLf+goWCDsTT+RT2EsdOOlJT6CwM= -github.com/ethereum-optimism/op-geth v1.101411.2-rc.1/go.mod h1:RrPkuqfeIXkW28lQJwc5AG/BKbhkHRXPD5YezeeK4w8= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7 h1:Mbgsp5T52F2pEULHccLr4NtnT6cKnJgabpAPlTfPxrk= -github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241113154227-e72c6311f6e7/go.mod h1:9feO8jcL5OZ1tvRjEfNAHz4Aggvd6373l+ZxmZZAyZs= +github.com/ethereum-optimism/op-geth v1.101411.2-rc.2 h1:3suWTU9DwBdY8Yy/ZgZLB/yBy3TwpntpkUn61mZgNpY= +github.com/ethereum-optimism/op-geth v1.101411.2-rc.2/go.mod h1:dITJzx1KXsV2KusscsktidEb00blTSyFhalq8CjfsUY= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241119111730-bee358f6d6e6 h1:+AIYWDX7FeWRLnBVqPiwireTacLLGGww1slGyv+YN0o= +github.com/ethereum-optimism/superchain-registry/superchain v0.0.0-20241119111730-bee358f6d6e6/go.mod h1:9feO8jcL5OZ1tvRjEfNAHz4Aggvd6373l+ZxmZZAyZs= github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA= github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A= @@ -440,8 +440,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2 h1:izciXrFyFR+ihJ7nLTOkoIX5GzBPIp8gVKlw94gIc98= github.com/kurtosis-tech/kurtosis-portal/api/golang v0.0.0-20230818182330-1a86869414d2/go.mod h1:bWSMQK3WHVTGHX9CjxPAb/LtzcmfOxID2wdzakSWQxo= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.1 h1:V/T5k7t1iKgFof1cGhyLh396YKdTehUqO97AsTPDy+k= -github.com/kurtosis-tech/kurtosis/api/golang v1.4.1/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.2 h1:x9jpXBGuLTWuILVUZWZtgDYY9amhyhzRVHxDFlYEJB4= +github.com/kurtosis-tech/kurtosis/api/golang v1.4.2/go.mod h1:9T22P7Vv3j5g6sbm78DxHQ4s9C4Cj3s9JjFQ7DFyYpM= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b h1:hMoIM99QKcYQqsnK4AF7Lovi9ZD9ac6lZLZ5D/jx2x8= github.com/kurtosis-tech/kurtosis/contexts-config-store v0.0.0-20230818184218-f4e3e773463b/go.mod h1:4pFdrRwDz5R+Fov2ZuTaPhAVgjA2jhGh1Izf832sX7A= github.com/kurtosis-tech/kurtosis/grpc-file-transfer/golang v0.0.0-20230803130419-099ee7a4e3dc h1:7IlEpSehmWcNXOFpNP24Cu5HQI3af7GCBQw//m+LnvQ= @@ -927,8 +927,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/interop-devnet/docker-compose.yml b/interop-devnet/docker-compose.yml index de97c9967b6..c4cbab978a1 100644 --- a/interop-devnet/docker-compose.yml +++ b/interop-devnet/docker-compose.yml @@ -319,6 +319,7 @@ services: OP_BATCHER_METRICS_ENABLED: "true" OP_BATCHER_RPC_ENABLE_ADMIN: "true" OP_BATCHER_BATCH_TYPE: + OP_BATCHER_THROTTLE_INTERVAL: 0 # uncomment to use blobs # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs env_file: @@ -350,6 +351,7 @@ services: OP_BATCHER_METRICS_ENABLED: "true" OP_BATCHER_RPC_ENABLE_ADMIN: "true" OP_BATCHER_BATCH_TYPE: + OP_BATCHER_THROTTLE_INTERVAL: 0 # uncomment to use blobs # OP_BATCHER_DATA_AVAILABILITY_TYPE: blobs env_file: diff --git a/just/default.just b/just/default.just new file mode 100644 index 00000000000..220c27dca5c --- /dev/null +++ b/just/default.just @@ -0,0 +1,5 @@ +set shell := ["bash", "-c"] + +PARALLEL := num_cpus() + +MAP_JUST := "/usr/bin/env -S parallel --shebang --jobs " + PARALLEL + " --colsep ' ' -r " + just_executable() diff --git a/just/deprecated.mk b/just/deprecated.mk new file mode 100644 index 00000000000..a18d665c054 --- /dev/null +++ b/just/deprecated.mk @@ -0,0 +1,45 @@ +ifeq (, $(shell which tput)) + # CI environment typically does not support tput. + banner-style = $1 +else + # print in bold red to bring attention. + banner-style = $(shell tput bold)$(shell tput setaf 1)$1$(shell tput sgr0) +endif + +# Variable assignments can affect the semantic of the make targets. +# Typical use-case: setting VERSION in a release build, since CI +# doesn't preserve the git environment. +# +# We need to translate: +# "make target VAR=val" to "just VAR=val target" +# +# MAKEFLAGS is a string of the form: +# "abc --foo --bar=baz -- VAR1=val1 VAR2=val2", namely: +# - abc is the concatnation of all short flags +# - --foo and --bar=baz are long options, +# - -- is the separator between flags and variable assignments, +# - VAR1=val1 and VAR2=val2 are variable assignments +# +# Goal: ignore all CLI flags, keep only variable assignments. +# +# First remove the short flags at the beginning, or the first long-flag, +# or if there is no flag at all, the -- separator (which then makes the +# next step a noop). If there's no flag and no variable assignment, the +# result is empty anyway, so the wordlist call is safe (everything is a noop). +tmp-flags = $(wordlist 2,$(words $(MAKEFLAGS)),$(MAKEFLAGS)) +# Then remove all long options, including the -- separator, if needed. That +# leaves only variable assignments. +just-flags = $(patsubst --%,,$(tmp-flags)) + +define make-deprecated-target +$1: + @echo + @printf %s\\n '$(call banner-style,"make $1 $(just-flags)" is deprecated. Please use "just $(just-flags) $1" instead.)' + @echo + just $(just-flags) $1 +endef + +$(foreach element,$(DEPRECATED_TARGETS),$(eval $(call make-deprecated-target,$(element)))) + +.PHONY: + $(DEPRECATED_TARGETS) diff --git a/just/git.just b/just/git.just new file mode 100644 index 00000000000..922286d7ab5 --- /dev/null +++ b/just/git.just @@ -0,0 +1,26 @@ +import 'default.just' + +# Set default values for git info +GITCOMMIT := env('GITCOMMIT', `git rev-parse HEAD 2> /dev/null || true`) +GITDATE := env('GITDATE', `git show -s --format='%ct' 2> /dev/null|| true`) + +_PROJECT := shell("basename $1", justfile_directory()) + +_ALL_TAGS := shell("git tag --points-at $1 2> /dev/null || true", GITCOMMIT) + +_PROJECT_TAGS := shell("echo $1 | grep ^$2/ | sed s:$2/:: | sort -V", _ALL_TAGS, _PROJECT) + +_PREFERRED_TAG := shell("echo $1 | grep -v -- '-rc' | tail -n 1", _PROJECT_TAGS) + +_LAST_TAG := shell("echo $1 | tail -n 1", _PROJECT_TAGS) + +# Find version tag, prioritizing non-rc release tags +VERSION := shell('if [ -z "$1" ]; then + if [ -z "$2" ]; then + echo "untagged" + else + echo "$2" + fi +else + echo $1 +fi', _PREFERRED_TAG, _LAST_TAG) diff --git a/just/go.just b/just/go.just new file mode 100644 index 00000000000..5af76629509 --- /dev/null +++ b/just/go.just @@ -0,0 +1,27 @@ +import 'git.just' + +_EXTRALDFLAGS := if os() == "macos" { "-ldflags=-extldflags=-Wl,-ld_classic" } else { "" } + +# We use both GOOS/GOARCH and TARGETOS/TARGETARCH to set the build targets. +# From the usage patterns, it looks like TARGETOS/TARGETARCH should take +# precedence if set, and default to GOOS/GOARCH if not set. +# TODO: should we just remove TARGETOS/TARGETARCH altogether eventually? +GOOS := env('GOOS', `go env GOOS`) +GOARCH := env('GOARCH', `go env GOARCH`) +TARGETOS := env('TARGETOS', GOOS) +TARGETARCH := env('TARGETARCH', GOARCH) + +GORACE := "0" + +_GORACE_FLAG := if GORACE == "1" { "-race " } else { "" } + +[private] +go_build BIN PKG *FLAGS: + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} CGO_ENABLED=0 go build -v {{_GORACE_FLAG}} {{FLAGS}} -o {{BIN}} {{PKG}} + +[private] +go_test SELECTOR *FLAGS: + go test -v {{_GORACE_FLAG}} {{FLAGS}} {{SELECTOR}} + +[private] +go_fuzz FUZZ TIME='10s' PKG='': (go_test PKG _EXTRALDFLAGS "-fuzztime" TIME "-fuzz" FUZZ "-run" "NOTAREALTEST") diff --git a/justfile b/justfile index e40186f75df..6438ba36197 100644 --- a/justfile +++ b/justfile @@ -61,4 +61,4 @@ check-semgrep: [ "$(just print-semgrep)" = "$(jq -r .semgrep < versions.json)" ] && echo '✓ semgrep versions match' || (echo '✗ semgrep version mismatch. Run `just upgrade-semgrep` to upgrade.' && exit 1) upgrade-semgrep: - jq '.semgrep = $v' --arg v $(just print-semgrep) <<<$(cat versions.json) > versions.json + pip3 install semgrep=="$(jq -r .semgrep < versions.json)" diff --git a/op-alt-da/cli.go b/op-alt-da/cli.go index 30ce2168f57..84364e47952 100644 --- a/op-alt-da/cli.go +++ b/op-alt-da/cli.go @@ -57,22 +57,25 @@ func CLIFlags(envPrefix string, category string) []cli.Flag { Category: category, }, &cli.DurationFlag{ - Name: PutTimeoutFlagName, - Usage: "Timeout for put requests. 0 means no timeout.", - Value: time.Duration(0), - EnvVars: altDAEnvs(envPrefix, "PUT_TIMEOUT"), + Name: PutTimeoutFlagName, + Usage: "Timeout for put requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "PUT_TIMEOUT"), + Category: category, }, &cli.DurationFlag{ - Name: GetTimeoutFlagName, - Usage: "Timeout for get requests. 0 means no timeout.", - Value: time.Duration(0), - EnvVars: altDAEnvs(envPrefix, "GET_TIMEOUT"), + Name: GetTimeoutFlagName, + Usage: "Timeout for get requests. 0 means no timeout.", + Value: time.Duration(0), + EnvVars: altDAEnvs(envPrefix, "GET_TIMEOUT"), + Category: category, }, &cli.Uint64Flag{ - Name: MaxConcurrentRequestsFlagName, - Usage: "Maximum number of concurrent requests to the DA server", - Value: 1, - EnvVars: altDAEnvs(envPrefix, "MAX_CONCURRENT_DA_REQUESTS"), + Name: MaxConcurrentRequestsFlagName, + Usage: "Maximum number of concurrent requests to the DA server", + Value: 1, + EnvVars: altDAEnvs(envPrefix, "MAX_CONCURRENT_DA_REQUESTS"), + Category: category, }, } } diff --git a/op-alt-da/cmd/daserver/main.go b/op-alt-da/cmd/daserver/main.go index 3ed37bd0532..bcaa9c47280 100644 --- a/op-alt-da/cmd/daserver/main.go +++ b/op-alt-da/cmd/daserver/main.go @@ -13,7 +13,7 @@ import ( oplog "github.com/ethereum-optimism/optimism/op-service/log" ) -var Version = "v0.0.1" +var Version = "v0.0.0" func main() { oplog.SetupDefaults() diff --git a/op-alt-da/commitment.go b/op-alt-da/commitment.go index cc5829ad4dc..a6fa5424665 100644 --- a/op-alt-da/commitment.go +++ b/op-alt-da/commitment.go @@ -108,7 +108,7 @@ func (c Keccak256Commitment) CommitmentType() CommitmentType { return Keccak256CommitmentType } -// Encode adds a commitment type prefix self describing the commitment. +// Encode adds a commitment type prefix that describes the commitment. func (c Keccak256Commitment) Encode() []byte { return append([]byte{byte(Keccak256CommitmentType)}, c...) } diff --git a/op-batcher/Makefile b/op-batcher/Makefile index 22bb7a61386..1501e0242d9 100644 --- a/op-batcher/Makefile +++ b/op-batcher/Makefile @@ -1,52 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -# Find the github tag that points to this commit. If none are found, set the version string to "untagged" -# Prioritizes release tag, if one exists, over tags suffixed with "-rc" -VERSION ?= $(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-batcher/' | sed 's/op-batcher\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) +DEPRECATED_TARGETS := op-batcher clean test fuzz -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X main.Version=$(VERSION) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" - -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -op-batcher: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v $(LDFLAGS) -o ./bin/op-batcher ./cmd - -clean: - rm bin/op-batcher - -test: - go test -v ./... - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelConfig_CheckTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationZero ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutMaxChannelDuration ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzDurationTimeoutZeroMaxChannelDuration ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelCloseTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzChannelZeroCloseTimeout ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowClose ./batcher" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime 10s -fuzz FuzzSeqWindowZeroTimeoutClose ./batcher" \ - | parallel -j 8 {} - -.PHONY: \ - op-batcher \ - clean \ - test \ - fuzz +include ../just/deprecated.mk diff --git a/op-batcher/architecture.png b/op-batcher/architecture.png new file mode 100644 index 00000000000..0eab940fbb5 Binary files /dev/null and b/op-batcher/architecture.png differ diff --git a/op-batcher/batcher/channel.go b/op-batcher/batcher/channel.go index dd0827d4686..95abcb46a7f 100644 --- a/op-batcher/batcher/channel.go +++ b/op-batcher/batcher/channel.go @@ -49,10 +49,10 @@ func newChannel(log log.Logger, metr metrics.Metricer, cfg ChannelConfig, rollup func (c *channel) TxFailed(id string) { if data, ok := c.pendingTransactions[id]; ok { c.log.Trace("marked transaction as failed", "id", id) - // Note: when the batcher is changed to send multiple frames per tx, - // this needs to be changed to iterate over all frames of the tx data - // and re-queue them. - c.channelBuilder.PushFrames(data.Frames()...) + // Rewind to the first frame of the failed tx + // -- the frames are ordered, and we want to send them + // all again. + c.channelBuilder.RewindFrameCursor(data.Frames()[0]) delete(c.pendingTransactions, id) } else { c.log.Warn("unknown transaction marked as failed", "id", id) @@ -61,18 +61,16 @@ func (c *channel) TxFailed(id string) { c.metr.RecordBatchTxFailed() } -// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in -// a channel have been marked as confirmed on L1 the channel may be invalid & need to be -// resubmitted. -// This function may reset the pending channel if the pending channel has timed out. -func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) (bool, []*types.Block) { - c.metr.RecordBatchTxSubmitted() +// TxConfirmed marks a transaction as confirmed on L1. Returns a bool indicating +// whether the channel timed out on chain. +func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) bool { + c.metr.RecordBatchTxSuccess() c.log.Debug("marked transaction as confirmed", "id", id, "block", inclusionBlock) if _, ok := c.pendingTransactions[id]; !ok { c.log.Warn("unknown transaction marked as confirmed", "id", id, "block", inclusionBlock) // TODO: This can occur if we clear the channel while there are still pending transactions // We need to keep track of stale transactions instead - return false, nil + return false } delete(c.pendingTransactions, id) c.confirmedTransactions[id] = inclusionBlock @@ -82,21 +80,20 @@ func (c *channel) TxConfirmed(id string, inclusionBlock eth.BlockID) (bool, []*t c.minInclusionBlock = min(c.minInclusionBlock, inclusionBlock.Number) c.maxInclusionBlock = max(c.maxInclusionBlock, inclusionBlock.Number) + if c.isFullySubmitted() { + c.metr.RecordChannelFullySubmitted(c.ID()) + c.log.Info("Channel is fully submitted", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) + } + // If this channel timed out, put the pending blocks back into the local saved blocks // and then reset this state so it can try to build a new channel. if c.isTimedOut() { c.metr.RecordChannelTimedOut(c.ID()) c.log.Warn("Channel timed out", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) - return true, c.channelBuilder.Blocks() - } - // If we are done with this channel, record that. - if c.isFullySubmitted() { - c.metr.RecordChannelFullySubmitted(c.ID()) - c.log.Info("Channel is fully submitted", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) - return true, nil + return true } - return false, nil + return false } // Timeout returns the channel timeout L1 block number. If there is no timeout set, it returns 0. @@ -136,7 +133,7 @@ func (c *channel) ID() derive.ChannelID { func (c *channel) NextTxData() txData { nf := c.cfg.MaxFramesPerTx() txdata := txData{frames: make([]frameData, 0, nf), asBlob: c.cfg.UseBlobs} - for i := 0; i < nf && c.channelBuilder.HasFrame(); i++ { + for i := 0; i < nf && c.channelBuilder.HasPendingFrame(); i++ { frame := c.channelBuilder.NextFrame() txdata.frames = append(txdata.frames, frame) } @@ -151,7 +148,7 @@ func (c *channel) NextTxData() txData { func (c *channel) HasTxData() bool { if c.IsFull() || // If the channel is full, we should start to submit it !c.cfg.UseBlobs { // If using calldata, we only send one frame per tx - return c.channelBuilder.HasFrame() + return c.channelBuilder.HasPendingFrame() } // Collect enough frames if channel is not full yet return c.channelBuilder.PendingFrames() >= int(c.cfg.MaxFramesPerTx()) diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index ae1fb03d284..597b5ed3e14 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/queue" "github.com/ethereum/go-ethereum/core/types" ) @@ -65,7 +66,7 @@ type ChannelBuilder struct { // current channel co derive.ChannelOut // list of blocks in the channel. Saved in case the channel must be rebuilt - blocks []*types.Block + blocks queue.Queue[*types.Block] // latestL1Origin is the latest L1 origin of all the L2 blocks that have been added to the channel latestL1Origin eth.BlockID // oldestL1Origin is the oldest L1 origin of all the L2 blocks that have been added to the channel @@ -75,7 +76,12 @@ type ChannelBuilder struct { // oldestL2 is the oldest L2 block of all the L2 blocks that have been added to the channel oldestL2 eth.BlockID // frames data queue, to be send as txs - frames []frameData + frames queue.Queue[frameData] + // frameCursor tracks which frames in the queue were submitted + // frames[frameCursor] is the next unsubmitted (pending) frame + // frameCursor = len(frames) is reserved for when + // there are no pending (next unsubmitted) frames + frameCursor int // total frames counter numFrames int // total amount of output data of all frames created yet @@ -190,7 +196,7 @@ func (c *ChannelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, erro return l1info, fmt.Errorf("adding block to channel out: %w", err) } - c.blocks = append(c.blocks, block) + c.blocks.Enqueue(block) c.updateSwTimeout(l1info.Number) if l1info.Number > c.latestL1Origin.Number { @@ -312,11 +318,11 @@ func (c *ChannelBuilder) setFullErr(err error) { } // OutputFrames creates new frames with the channel out. It should be called -// after AddBlock and before iterating over available frames with HasFrame and +// after AddBlock and before iterating over pending frames with HasFrame and // NextFrame. // // If the channel isn't full yet, it will conservatively only -// pull readily available frames from the compression output. +// pull pending frames from the compression output. // If it is full, the channel is closed and all remaining // frames will be created, possibly with a small leftover frame. func (c *ChannelBuilder) OutputFrames() error { @@ -387,7 +393,7 @@ func (c *ChannelBuilder) outputFrame() error { id: frameID{chID: c.co.ID(), frameNumber: fn}, data: buf.Bytes(), } - c.frames = append(c.frames, frame) + c.frames.Enqueue(frame) c.numFrames++ c.outputBytes += len(frame.data) return err // possibly io.EOF (last frame) @@ -402,46 +408,47 @@ func (c *ChannelBuilder) Close() { } // TotalFrames returns the total number of frames that were created in this channel so far. -// It does not decrease when the frames queue is being emptied. func (c *ChannelBuilder) TotalFrames() int { return c.numFrames } -// HasFrame returns whether there's any available frame. If true, it can be -// popped using NextFrame(). +// HasPendingFrame returns whether there's any pending frame. If true, it can be +// dequeued using NextFrame(). // // Call OutputFrames before to create new frames from the channel out // compression pipeline. -func (c *ChannelBuilder) HasFrame() bool { - return len(c.frames) > 0 +func (c *ChannelBuilder) HasPendingFrame() bool { + return c.frameCursor < c.frames.Len() } // PendingFrames returns the number of pending frames in the frames queue. -// It is larger zero iff HasFrame() returns true. +// It is larger than zero iff HasFrame() returns true. func (c *ChannelBuilder) PendingFrames() int { - return len(c.frames) + return c.frames.Len() - c.frameCursor } -// NextFrame dequeues the next available frame. -// HasFrame must be called prior to check if there's a next frame available. +// NextFrame returns the next pending frame and increments the frameCursor +// HasFrame must be called prior to check if there a next pending frame exists. // Panics if called when there's no next frame. func (c *ChannelBuilder) NextFrame() frameData { - if len(c.frames) == 0 { + if len(c.frames) <= c.frameCursor { panic("no next frame") } - - f := c.frames[0] - c.frames = c.frames[1:] + f := c.frames[c.frameCursor] + c.frameCursor++ return f } -// PushFrames adds the frames back to the internal frames queue. Panics if not of -// the same channel. -func (c *ChannelBuilder) PushFrames(frames ...frameData) { - for _, f := range frames { - if f.id.chID != c.ID() { - panic("wrong channel") - } - c.frames = append(c.frames, f) +// RewindFrameCursor moves the frameCursor to point at the supplied frame +// only if it is ahead of it. +// Panics if the frame is not in this channel. +func (c *ChannelBuilder) RewindFrameCursor(frame frameData) { + if c.frames.Len() <= int(frame.id.frameNumber) || + len(c.frames[frame.id.frameNumber].data) != len(frame.data) || + c.frames[frame.id.frameNumber].id.chID != frame.id.chID { + panic("cannot rewind to unknown frame") + } + if c.frameCursor > int(frame.id.frameNumber) { + c.frameCursor = int(frame.id.frameNumber) } } diff --git a/op-batcher/batcher/channel_builder_test.go b/op-batcher/batcher/channel_builder_test.go index 957f9ae5973..6994186b7f0 100644 --- a/op-batcher/batcher/channel_builder_test.go +++ b/op-batcher/batcher/channel_builder_test.go @@ -299,6 +299,7 @@ func TestChannelBuilderBatchType(t *testing.T) { {"ChannelBuilder_PendingFrames_TotalFrames", ChannelBuilder_PendingFrames_TotalFrames}, {"ChannelBuilder_InputBytes", ChannelBuilder_InputBytes}, {"ChannelBuilder_OutputBytes", ChannelBuilder_OutputBytes}, + {"ChannelBuilder_OutputWrongFramePanic", ChannelBuilder_OutputWrongFramePanic}, } for _, test := range tests { test := test @@ -340,7 +341,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) { }, data: expectedBytes, } - cb.PushFrames(frameData) + cb.frames = append(cb.frames, frameData) // There should only be 1 frame in the channel builder require.Equal(t, 1, cb.PendingFrames()) @@ -355,7 +356,7 @@ func TestChannelBuilder_NextFrame(t *testing.T) { require.PanicsWithValue(t, "no next frame", func() { cb.NextFrame() }) } -// TestChannelBuilder_OutputWrongFramePanic tests that a panic is thrown when a frame is pushed with an invalid frame id +// TestChannelBuilder_OutputWrongFramePanic tests that a panic is thrown when we try to rewind the cursor with an invalid frame id func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { channelConfig := defaultTestChannelConfig() channelConfig.BatchType = batchType @@ -377,7 +378,7 @@ func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { // The frame push should panic since we constructed a new channel out // so the channel out id won't match - require.PanicsWithValue(t, "wrong channel", func() { + require.PanicsWithValue(t, "cannot rewind to unknown frame", func() { frame := frameData{ id: frameID{ chID: co.ID(), @@ -385,7 +386,7 @@ func ChannelBuilder_OutputWrongFramePanic(t *testing.T, batchType uint) { }, data: buf.Bytes(), } - cb.PushFrames(frame) + cb.RewindFrameCursor(frame) }) } @@ -625,11 +626,11 @@ func TestChannelBuilder_FullShadowCompressor(t *testing.T) { require.NoError(cb.OutputFrames()) - require.True(cb.HasFrame()) + require.True(cb.HasPendingFrame()) f := cb.NextFrame() require.Less(len(f.data), int(cfg.MaxFrameSize)) // would fail without fix, full frame - require.False(cb.HasFrame(), "no leftover frame expected") // would fail without fix + require.False(cb.HasPendingFrame(), "no leftover frame expected") // would fail without fix } func ChannelBuilder_AddBlock(t *testing.T, batchType uint) { @@ -656,8 +657,8 @@ func ChannelBuilder_AddBlock(t *testing.T, batchType uint) { expectedInputBytes = 47 } require.Equal(t, expectedInputBytes, cb.co.InputBytes()) - require.Equal(t, 1, len(cb.blocks)) - require.Equal(t, 0, len(cb.frames)) + require.Equal(t, 1, cb.blocks.Len()) + require.Equal(t, 0, cb.frames.Len()) require.True(t, cb.IsFull()) // Since the channel output is full, the next call to AddBlock @@ -858,7 +859,7 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { // empty queue for pf := nf - 1; pf >= 0; pf-- { - require.True(cb.HasFrame()) + require.True(cb.HasPendingFrame()) _ = cb.NextFrame() require.Equal(cb.PendingFrames(), pf) require.Equal(cb.TotalFrames(), nf) @@ -932,7 +933,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { require.Greater(cb.PendingFrames(), 1) var flen int - for cb.HasFrame() { + for cb.HasPendingFrame() { f := cb.NextFrame() flen += len(f.data) } diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 06403645ea4..81ee0fb35a5 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -39,8 +39,13 @@ type channelManager struct { // All blocks since the last request for new tx data. blocks queue.Queue[*types.Block] - // The latest L1 block from all the L2 blocks in the most recently closed channel - l1OriginLastClosedChannel eth.BlockID + // blockCursor is an index into blocks queue. It points at the next block + // to build a channel with. blockCursor = len(blocks) is reserved for when + // there are no blocks ready to build with. + blockCursor int + // The latest L1 block from all the L2 blocks in the most recently submitted channel. + // Used to track channel duration timeouts. + l1OriginLastSubmittedChannel eth.BlockID // The default ChannelConfig to use for the next channel defaultCfg ChannelConfig // last block hash - for reorg detection @@ -52,9 +57,6 @@ type channelManager struct { channelQueue []*channel // used to lookup channels by tx ID upon tx success / failure txChannels map[string]*channel - - // if set to true, prevents production of any new channel frames - closed bool } func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider ChannelConfigProvider, rollupCfg *rollup.Config) *channelManager { @@ -75,19 +77,23 @@ func (s *channelManager) SetChannelOutFactory(outFactory ChannelOutFactory) { // Clear clears the entire state of the channel manager. // It is intended to be used before launching op-batcher and after an L2 reorg. -func (s *channelManager) Clear(l1OriginLastClosedChannel eth.BlockID) { +func (s *channelManager) Clear(l1OriginLastSubmittedChannel eth.BlockID) { s.mu.Lock() defer s.mu.Unlock() s.log.Trace("clearing channel manager state") s.blocks.Clear() - s.l1OriginLastClosedChannel = l1OriginLastClosedChannel + s.blockCursor = 0 + s.l1OriginLastSubmittedChannel = l1OriginLastSubmittedChannel s.tip = common.Hash{} - s.closed = false s.currentChannel = nil s.channelQueue = nil s.txChannels = make(map[string]*channel) } +func (s *channelManager) pendingBlocks() int { + return s.blocks.Len() - s.blockCursor +} + // TxFailed records a transaction as failed. It will attempt to resubmit the data // in the failed transaction. func (s *channelManager) TxFailed(_id txID) { @@ -97,34 +103,21 @@ func (s *channelManager) TxFailed(_id txID) { if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) channel.TxFailed(id) - if s.closed && channel.NoneSubmitted() { - s.log.Info("Channel has no submitted transactions, clearing for shutdown", "chID", channel.ID()) - s.removePendingChannel(channel) - } } else { s.log.Warn("transaction from unknown channel marked as failed", "id", id) } } -// TxConfirmed marks a transaction as confirmed on L1. Unfortunately even if all frames in -// a channel have been marked as confirmed on L1 the channel may be invalid & need to be -// resubmitted. -// This function may reset the pending channel if the pending channel has timed out. +// TxConfirmed marks a transaction as confirmed on L1. Only if the channel timed out +// the channelManager's state is modified. func (s *channelManager) TxConfirmed(_id txID, inclusionBlock eth.BlockID) { s.mu.Lock() defer s.mu.Unlock() id := _id.String() if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) - done, blocksToRequeue := channel.TxConfirmed(id, inclusionBlock) - if done { - s.removePendingChannel(channel) - if len(blocksToRequeue) > 0 { - s.blocks.Prepend(blocksToRequeue...) - } - for _, b := range blocksToRequeue { - s.metr.RecordL2BlockInPendingQueue(b) - } + if timedOut := channel.TxConfirmed(id, inclusionBlock); timedOut { + s.handleChannelInvalidated(channel) } } else { s.log.Warn("transaction from unknown channel marked as confirmed", "id", id) @@ -133,23 +126,48 @@ func (s *channelManager) TxConfirmed(_id txID, inclusionBlock eth.BlockID) { s.log.Debug("marked transaction as confirmed", "id", id, "block", inclusionBlock) } -// removePendingChannel removes the given completed channel from the manager's state. -func (s *channelManager) removePendingChannel(channel *channel) { - if s.currentChannel == channel { - s.currentChannel = nil +// rewindToBlock updates the blockCursor to point at +// the block with the supplied hash, only if that block exists +// in the block queue and the blockCursor is ahead of it. +// Panics if the block is not in state. +func (s *channelManager) rewindToBlock(block eth.BlockID) { + idx := block.Number - s.blocks[0].Number().Uint64() + if s.blocks[idx].Hash() == block.Hash && idx < uint64(s.blockCursor) { + s.blockCursor = int(idx) + } else { + panic("tried to rewind to nonexistent block") } - index := -1 - for i, c := range s.channelQueue { - if c == channel { - index = i - break +} + +// handleChannelInvalidated rewinds the channelManager's blockCursor +// to point at the first block added to the provided channel, +// and removes the channel from the channelQueue, along with +// any channels which are newer than the provided channel. +func (s *channelManager) handleChannelInvalidated(c *channel) { + if len(c.channelBuilder.blocks) > 0 { + // This is usually true, but there is an edge case + // where a channel timed out before any blocks got added. + // In that case we end up with an empty frame (header only), + // and there are no blocks to requeue. + blockID := eth.ToBlockID(c.channelBuilder.blocks[0]) + for _, block := range c.channelBuilder.blocks { + s.metr.RecordL2BlockInPendingQueue(block) } + s.rewindToBlock(blockID) + } else { + s.log.Debug("channelManager.handleChanneInvalidated: channel had no blocks") } - if index < 0 { - s.log.Warn("channel not found in channel queue", "id", channel.ID()) - return + + // Trim provided channel and any older channels: + for i := range s.channelQueue { + if s.channelQueue[i] == c { + s.channelQueue = s.channelQueue[:i] + break + } } - s.channelQueue = append(s.channelQueue[:index], s.channelQueue[index+1:]...) + + // We want to start writing to a new channel, so reset currentChannel. + s.currentChannel = nil } // nextTxData dequeues frames from the channel and returns them encoded in a transaction. @@ -160,6 +178,12 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { return txData{}, io.EOF // TODO: not enough data error instead } tx := channel.NextTxData() + + // update s.l1OriginLastSubmittedChannel so that the next + // channel's duration timeout will trigger properly + if channel.LatestL1Origin().Number > s.l1OriginLastSubmittedChannel.Number { + s.l1OriginLastSubmittedChannel = channel.LatestL1Origin() + } s.txChannels[tx.ID().String()] = channel return tx, nil } @@ -200,7 +224,16 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) { s.log.Info("Recomputing optimal ChannelConfig: changing DA type and requeing blocks...", "useBlobsBefore", s.defaultCfg.UseBlobs, "useBlobsAfter", newCfg.UseBlobs) - s.Requeue(newCfg) + + // Invalidate the channel so its blocks + // get requeued: + s.handleChannelInvalidated(channel) + + // Set the defaultCfg so new channels + // pick up the new ChannelConfig + s.defaultCfg = newCfg + + // Try again to get data to send on chain. channel, err = s.getReadyChannel(l1Head) if err != nil { return emptyTxData, err @@ -231,14 +264,9 @@ func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { return firstWithTxData, nil } - if s.closed { - return nil, io.EOF - } - // No pending tx data, so we have to add new blocks to the channel - // If we have no saved blocks, we will not be able to create valid frames - if s.blocks.Len() == 0 { + if s.pendingBlocks() == 0 { return nil, io.EOF } @@ -284,7 +312,7 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { return fmt.Errorf("creating channel out: %w", err) } - pc := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastClosedChannel.Number, channelOut) + pc := newChannel(s.log, s.metr, cfg, s.rollupCfg, s.l1OriginLastSubmittedChannel.Number, channelOut) s.currentChannel = pc s.channelQueue = append(s.channelQueue, pc) @@ -292,8 +320,8 @@ func (s *channelManager) ensureChannelWithSpace(l1Head eth.BlockID) error { s.log.Info("Created channel", "id", pc.ID(), "l1Head", l1Head, - "l1OriginLastClosedChannel", s.l1OriginLastClosedChannel, - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), + "l1OriginLastSubmittedChannel", s.l1OriginLastSubmittedChannel, "batch_type", cfg.BatchType, "compression_algo", cfg.CompressorConfig.CompressionAlgo, "target_num_frames", cfg.TargetNumFrames, @@ -324,7 +352,7 @@ func (s *channelManager) processBlocks() error { latestL2ref eth.L2BlockRef ) - for i := 0; ; i++ { + for i := s.blockCursor; ; i++ { block, ok := s.blocks.PeekN(i) if !ok { break @@ -348,7 +376,7 @@ func (s *channelManager) processBlocks() error { } } - _, _ = s.blocks.DequeueN(blocksAdded) + s.blockCursor += blocksAdded s.metr.RecordL2BlocksAdded(latestL2ref, blocksAdded, @@ -357,7 +385,7 @@ func (s *channelManager) processBlocks() error { s.currentChannel.ReadyBytes()) s.log.Debug("Added blocks to channel", "blocks_added", blocksAdded, - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), "channel_full", s.currentChannel.IsFull(), "input_bytes", s.currentChannel.InputBytes(), "ready_bytes", s.currentChannel.ReadyBytes(), @@ -374,15 +402,10 @@ func (s *channelManager) outputFrames() error { return nil } - lastClosedL1Origin := s.currentChannel.LatestL1Origin() - if lastClosedL1Origin.Number > s.l1OriginLastClosedChannel.Number { - s.l1OriginLastClosedChannel = lastClosedL1Origin - } - inBytes, outBytes := s.currentChannel.InputBytes(), s.currentChannel.OutputBytes() s.metr.RecordChannelClosed( s.currentChannel.ID(), - s.blocks.Len(), + s.pendingBlocks(), s.currentChannel.TotalFrames(), inBytes, outBytes, @@ -396,17 +419,16 @@ func (s *channelManager) outputFrames() error { s.log.Info("Channel closed", "id", s.currentChannel.ID(), - "blocks_pending", s.blocks.Len(), + "blocks_pending", s.pendingBlocks(), "num_frames", s.currentChannel.TotalFrames(), "input_bytes", inBytes, "output_bytes", outBytes, "oldest_l1_origin", s.currentChannel.OldestL1Origin(), - "l1_origin", lastClosedL1Origin, + "l1_origin", s.currentChannel.LatestL1Origin(), "oldest_l2", s.currentChannel.OldestL2(), "latest_l2", s.currentChannel.LatestL2(), "full_reason", s.currentChannel.FullErr(), "compr_ratio", comprRatio, - "latest_l1_origin", s.l1OriginLastClosedChannel, ) return nil } @@ -442,83 +464,77 @@ func l2BlockRefFromBlockAndL1Info(block *types.Block, l1info *derive.L1BlockInfo var ErrPendingAfterClose = errors.New("pending channels remain after closing channel-manager") -// Close clears any pending channels that are not in-flight already, to leave a clean derivation state. -// Close then marks the remaining current open channel, if any, as "full" so it can be submitted as well. -// Close does NOT immediately output frames for the current remaining channel: -// as this might error, due to limitations on a single channel. -// Instead, this is part of the pending-channel submission work: after closing, -// the caller SHOULD drain pending channels by generating TxData repeatedly until there is none left (io.EOF). -// A ErrPendingAfterClose error will be returned if there are any remaining pending channels to submit. -func (s *channelManager) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - if s.closed { - return nil +// pruneSafeBlocks dequeues blocks from the internal blocks queue +// if they have now become safe. +func (s *channelManager) pruneSafeBlocks(newSafeHead eth.L2BlockRef) { + oldestBlock, ok := s.blocks.Peek() + if !ok { + // no blocks to prune + return } - s.closed = true - s.log.Info("Channel manager is closing") - - // Any pending state can be proactively cleared if there are no submitted transactions - for _, ch := range s.channelQueue { - if ch.NoneSubmitted() { - s.log.Info("Channel has no past or pending submission - dropping", "id", ch.ID()) - s.removePendingChannel(ch) - } else { - s.log.Info("Channel is in-flight and will need to be submitted after close", "id", ch.ID(), "confirmed", len(ch.confirmedTransactions), "pending", len(ch.pendingTransactions)) - } + if newSafeHead.Number+1 == oldestBlock.NumberU64() { + // no blocks to prune + return } - s.log.Info("Reviewed all pending channels on close", "remaining", len(s.channelQueue)) - if s.currentChannel == nil { - return nil + if newSafeHead.Number+1 < oldestBlock.NumberU64() { + // This could happen if there was an L1 reorg. + s.log.Warn("safe head reversed, clearing channel manager state", + "oldestBlock", eth.ToBlockID(oldestBlock), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - // If the channel is already full, we don't need to close it or output frames. - // This would already have happened in TxData. - if !s.currentChannel.IsFull() { - // Force-close the remaining open channel early (if not already closed): - // it will be marked as "full" due to service termination. - s.currentChannel.Close() + numBlocksToDequeue := newSafeHead.Number + 1 - oldestBlock.NumberU64() - // Final outputFrames call in case there was unflushed data in the compressor. - if err := s.outputFrames(); err != nil { - return fmt.Errorf("outputting frames during close: %w", err) - } + if numBlocksToDequeue > uint64(s.blocks.Len()) { + // This could happen if the batcher restarted. + // The sequencer may have derived the safe chain + // from channels sent by a previous batcher instance. + s.log.Warn("safe head above unsafe head, clearing channel manager state", + "unsafeBlock", eth.ToBlockID(s.blocks[s.blocks.Len()-1]), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - if s.currentChannel.HasTxData() { - // Make it clear to the caller that there is remaining pending work. - return ErrPendingAfterClose + if s.blocks[numBlocksToDequeue-1].Hash() != newSafeHead.Hash { + s.log.Warn("safe chain reorg, clearing channel manager state", + "existingBlock", eth.ToBlockID(s.blocks[numBlocksToDequeue-1]), + "newSafeBlock", newSafeHead) + // We should restart work from the new safe head, + // and therefore prune all the blocks. + s.Clear(newSafeHead.L1Origin) + return } - return nil -} -// Requeue rebuilds the channel manager state by -// rewinding blocks back from the channel queue, and setting the defaultCfg. -func (s *channelManager) Requeue(newCfg ChannelConfig) { - newChannelQueue := []*channel{} - blocksToRequeue := []*types.Block{} - for _, channel := range s.channelQueue { - if !channel.NoneSubmitted() { - newChannelQueue = append(newChannelQueue, channel) - continue - } - blocksToRequeue = append(blocksToRequeue, channel.channelBuilder.Blocks()...) - } + // This shouldn't return an error because + // We already checked numBlocksToDequeue <= s.blocks.Len() + _, _ = s.blocks.DequeueN(int(numBlocksToDequeue)) + s.blockCursor -= int(numBlocksToDequeue) - // We put the blocks back at the front of the queue: - s.blocks.Prepend(blocksToRequeue...) - for _, b := range blocksToRequeue { - s.metr.RecordL2BlockInPendingQueue(b) + if s.blockCursor < 0 { + panic("negative blockCursor") } +} - // Channels which where already being submitted are put back - s.channelQueue = newChannelQueue - s.currentChannel = nil - // Setting the defaultCfg will cause new channels - // to pick up the new ChannelConfig - s.defaultCfg = newCfg +// pruneChannels dequeues channels from the internal channels queue +// if they were built using blocks which are now safe +func (s *channelManager) pruneChannels(newSafeHead eth.L2BlockRef) { + i := 0 + for _, ch := range s.channelQueue { + if ch.LatestL2().Number > newSafeHead.Number { + break + } + i++ + } + s.channelQueue = s.channelQueue[i:] } // PendingDABytes returns the current number of bytes pending to be written to the DA layer (from blocks fetched from L2 @@ -533,3 +549,19 @@ func (s *channelManager) PendingDABytes() int64 { } return int64(f) } + +// CheckExpectedProgress uses the supplied syncStatus to infer +// whether the node providing the status has made the expected +// safe head progress given fully submitted channels held in +// state. +func (m *channelManager) CheckExpectedProgress(syncStatus eth.SyncStatus) error { + for _, ch := range m.channelQueue { + if ch.isFullySubmitted() && // This implies a number of l1 confirmations has passed, depending on how the txmgr was configured + !ch.isTimedOut() && + syncStatus.CurrentL1.Number > ch.maxInclusionBlock && + syncStatus.SafeL2.Number < ch.LatestL2().Number { + return errors.New("safe head did not make expected progress") + } + } + return nil +} diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 8dcb0745c16..32aae1b06dd 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -40,10 +40,6 @@ func TestChannelManagerBatchType(t *testing.T) { {"ChannelManagerReturnsErrReorgWhenDrained", ChannelManagerReturnsErrReorgWhenDrained}, {"ChannelManager_Clear", ChannelManager_Clear}, {"ChannelManager_TxResend", ChannelManager_TxResend}, - {"ChannelManagerCloseBeforeFirstUse", ChannelManagerCloseBeforeFirstUse}, - {"ChannelManagerCloseNoPendingChannel", ChannelManagerCloseNoPendingChannel}, - {"ChannelManagerClosePendingChannel", ChannelManagerClosePendingChannel}, - {"ChannelManagerCloseAllTxsFailed", ChannelManagerCloseAllTxsFailed}, } for _, test := range tests { test := test @@ -130,7 +126,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Channel Manager state should be empty by default require.Empty(m.blocks) - require.Equal(eth.BlockID{}, m.l1OriginLastClosedChannel) + require.Equal(eth.BlockID{}, m.l1OriginLastSubmittedChannel) require.Equal(common.Hash{}, m.tip) require.Nil(m.currentChannel) require.Empty(m.channelQueue) @@ -154,15 +150,14 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Process the blocks // We should have a pending channel with 1 frame - // and no more blocks since processBlocks consumes - // the list + require.NoError(m.processBlocks()) require.NoError(m.currentChannel.channelBuilder.co.Flush()) require.NoError(m.outputFrames()) _, err := m.nextTxData(m.currentChannel) require.NoError(err) - require.NotNil(m.l1OriginLastClosedChannel) - require.Len(m.blocks, 0) + require.Equal(m.blockCursor, len(m.blocks)) + require.NotNil(m.l1OriginLastSubmittedChannel) require.Equal(newL1Tip, m.tip) require.Len(m.currentChannel.pendingTransactions, 1) @@ -173,7 +168,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { ParentHash: a.Hash(), }, nil, nil, nil) require.NoError(m.AddL2Block(b)) - require.Len(m.blocks, 1) + require.Equal(m.blockCursor, len(m.blocks)-1) require.Equal(b.Hash(), m.tip) safeL1Origin := eth.BlockID{ @@ -184,7 +179,7 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { // Check that the entire channel manager state cleared require.Empty(m.blocks) - require.Equal(uint64(123), m.l1OriginLastClosedChannel.Number) + require.Equal(uint64(123), m.l1OriginLastSubmittedChannel.Number) require.Equal(common.Hash{}, m.tip) require.Nil(m.currentChannel) require.Empty(m.channelQueue) @@ -228,220 +223,6 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { require.Len(fs, 1) } -// ChannelManagerCloseBeforeFirstUse ensures that the channel manager -// will not produce any frames if closed immediately. -func ChannelManagerCloseBeforeFirstUse(t *testing.T, batchType uint) { - require := require.New(t) - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - log := testlog.Logger(t, log.LevelCrit) - m := NewChannelManager(log, metrics.NoopMetrics, - channelManagerTestConfig(10000, batchType), - defaultTestRollupConfig, - ) - m.Clear(eth.BlockID{}) - - a := derivetest.RandomL2BlockWithChainId(rng, 4, defaultTestRollupConfig.L2ChainID) - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to contain no tx data") -} - -// ChannelManagerCloseNoPendingChannel ensures that the channel manager -// can gracefully close with no pending channels, and will not emit any new -// channel frames. -func ChannelManagerCloseNoPendingChannel(t *testing.T, batchType uint) { - require := require.New(t) - log := testlog.Logger(t, log.LevelCrit) - cfg := channelManagerTestConfig(10000, batchType) - cfg.CompressorConfig.TargetOutputSize = 1 // full on first block - cfg.ChannelTimeout = 1000 - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - a := newMiniL2Block(0) - b := newMiniL2BlockWithNumberParent(0, big.NewInt(1), a.Hash()) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to return valid tx data") - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected channel manager to EOF") - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - err = m.AddL2Block(b) - require.NoError(err, "Failed to add L2 block") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to return no new tx data") -} - -// ChannelManagerClosePendingChannel ensures that the channel manager -// can gracefully close with a pending channel, and will not produce any -// new channel frames after this point. -func ChannelManagerClosePendingChannel(t *testing.T, batchType uint) { - require := require.New(t) - // The number of batch txs depends on compression of the random data, hence the static test RNG seed. - // Example of different RNG seed that creates less than 2 frames: 1698700588902821588 - rng := rand.New(rand.NewSource(123)) - log := testlog.Logger(t, log.LevelError) - cfg := channelManagerTestConfig(10_000, batchType) - cfg.ChannelTimeout = 1000 - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - numTx := 20 // Adjust number of txs to make 2 frames - a := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce valid tx data") - log.Info("generated first tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - require.ErrorIs(m.Close(), ErrPendingAfterClose, "Expected channel manager to error on close because of pending tx data") - - txdata, err = m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data") - log.Info("generated more tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected channel manager to have no more tx data") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - -// ChannelManager_Close_PartiallyPendingChannel ensures that the channel manager -// can gracefully close with a pending channel, where a block is still waiting -// inside the compressor to be flushed. -// -// This test runs only for singular batches on purpose. -// The SpanChannelOut writes full span batches to the compressor for -// every new block that's added, so NonCompressor cannot be used to -// set up a scenario where data is only partially flushed. -// Couldn't get the test to work even with modifying NonCompressor -// to flush half-way through writing to the compressor... -func TestChannelManager_Close_PartiallyPendingChannel(t *testing.T) { - require := require.New(t) - // The number of batch txs depends on compression of the random data, hence the static test RNG seed. - // Example of different RNG seed that creates less than 2 frames: 1698700588902821588 - rng := rand.New(rand.NewSource(123)) - log := testlog.Logger(t, log.LevelError) - cfg := ChannelConfig{ - MaxFrameSize: 2200, - ChannelTimeout: 1000, - TargetNumFrames: 100, - } - cfg.InitNoneCompressor() - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - numTx := 3 // Adjust number of txs to make 2 frames - a := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - b := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) - bHeader := b.Header() - bHeader.Number = new(big.Int).Add(a.Number(), big.NewInt(1)) - bHeader.ParentHash = a.Hash() - b = b.WithSeal(bHeader) - - require.NoError(m.AddL2Block(a), "adding 1st L2 block") - require.NoError(m.AddL2Block(b), "adding 2nd L2 block") - - // Inside TxData, the two blocks queued above are written to the compressor. - // The NonCompressor will flush the first, but not the second block, when - // adding the second block, setting up the test with a partially flushed - // compressor. - txdata, err := m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce valid tx data") - log.Info("generated first tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - // ensure no new ready data before closing - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected unclosed channel manager to only return a single frame") - - require.ErrorIs(m.Close(), ErrPendingAfterClose, "Expected channel manager to error on close because of pending tx data") - require.NotNil(m.currentChannel) - require.ErrorIs(m.currentChannel.FullErr(), ErrTerminated, "Expected current channel to be terminated by Close") - - txdata, err = m.TxData(eth.BlockID{}) - require.NoError(err, "Expected channel manager to produce tx data from remaining L2 block data") - log.Info("generated more tx data", "len", txdata.Len()) - - m.TxConfirmed(txdata.ID(), eth.BlockID{}) - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - -// ChannelManagerCloseAllTxsFailed ensures that the channel manager -// can gracefully close after producing transaction frames if none of these -// have successfully landed on chain. -func ChannelManagerCloseAllTxsFailed(t *testing.T, batchType uint) { - require := require.New(t) - rng := rand.New(rand.NewSource(1357)) - log := testlog.Logger(t, log.LevelCrit) - cfg := channelManagerTestConfig(100, batchType) - cfg.TargetNumFrames = 1000 - cfg.InitNoneCompressor() - m := NewChannelManager(log, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.Clear(eth.BlockID{}) - - a := derivetest.RandomL2BlockWithChainId(rng, 1000, defaultTestRollupConfig.L2ChainID) - - err := m.AddL2Block(a) - require.NoError(err, "Failed to add L2 block") - - drainTxData := func() (txdatas []txData) { - for { - txdata, err := m.TxData(eth.BlockID{}) - if err == io.EOF { - return - } - require.NoError(err, "Expected channel manager to produce valid tx data") - txdatas = append(txdatas, txdata) - } - } - - txdatas := drainTxData() - require.NotEmpty(txdatas) - - for _, txdata := range txdatas { - m.TxFailed(txdata.ID()) - } - - // Show that this data will continue to be emitted as long as the transaction - // fails and the channel manager is not closed - txdatas1 := drainTxData() - require.NotEmpty(txdatas) - require.ElementsMatch(txdatas, txdatas1, "expected same txdatas on re-attempt") - - for _, txdata := range txdatas1 { - m.TxFailed(txdata.ID()) - } - - require.NoError(m.Close(), "Expected to close channel manager gracefully") - - _, err = m.TxData(eth.BlockID{}) - require.ErrorIs(err, io.EOF, "Expected closed channel manager to produce no more tx data") -} - func TestChannelManager_ChannelCreation(t *testing.T) { l := testlog.Logger(t, log.LevelCrit) const maxChannelDuration = 15 @@ -475,7 +256,7 @@ func TestChannelManager_ChannelCreation(t *testing.T) { t.Run(test.name, func(t *testing.T) { m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.l1OriginLastClosedChannel = test.safeL1Block + m.l1OriginLastSubmittedChannel = test.safeL1Block require.Nil(t, m.currentChannel) require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) @@ -543,10 +324,12 @@ func TestChannelManager_TxData(t *testing.T) { // * One when the channelManager was created // * One when the channel is about to be submitted - // * Potentially one more if the replacement channel is about to be submitted, - // this only happens when going from calldata->blobs because - // the channel is no longer ready to send until more data - // is added. + // * Potentially one more when the replacement channel + // is not immediately ready to be submitted, but later + // becomes ready after more data is added. + // This only happens when going from calldata->blobs because + // the channel is not immediately ready to send until more data + // is added due to blob channels having greater capacity. numExpectedAssessments int } @@ -591,7 +374,7 @@ func TestChannelManager_TxData(t *testing.T) { // we get some data to submit var data txData for { - m.blocks = []*types.Block{blockA} + m.blocks = append(m.blocks, blockA) data, err = m.TxData(eth.BlockID{}) if err == nil && data.Len() > 0 { break @@ -609,16 +392,15 @@ func TestChannelManager_TxData(t *testing.T) { } -// TestChannelManager_Requeue seeds the channel manager with blocks, +// TestChannelManager_handleChannelInvalidated seeds the channel manager with blocks, // takes a state snapshot, triggers the blocks->channels pipeline, -// and then calls Requeue. Finally, it asserts the channel manager's -// state is equal to the snapshot. It repeats this for a channel -// which has a pending transaction and verifies that Requeue is then -// a noop. -func TestChannelManager_Requeue(t *testing.T) { +// and then calls handleChannelInvalidated. It asserts on the final state of +// the channel manager. +func TestChannelManager_handleChannelInvalidated(t *testing.T) { l := testlog.Logger(t, log.LevelCrit) cfg := channelManagerTestConfig(100, derive.SingularBatchType) - m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + metrics := new(metrics.TestMetrics) + m := NewChannelManager(l, metrics, cfg, defaultTestRollupConfig) // Seed channel manager with blocks rng := rand.New(rand.NewSource(99)) @@ -631,42 +413,197 @@ func TestChannelManager_Requeue(t *testing.T) { m.blocks = stateSnapshot require.Empty(t, m.channelQueue) + // Place an old channel in the queue. + // This channel should not be affected by + // a requeue or a later channel timing out. + oldChannel := newChannel(l, nil, m.defaultCfg, defaultTestRollupConfig, 0, nil) + oldChannel.Close() + m.channelQueue = []*channel{oldChannel} + require.Len(t, m.channelQueue, 1) + + // Setup initial metrics + metrics.RecordL2BlockInPendingQueue(blockA) + metrics.RecordL2BlockInPendingQueue(blockB) + pendingBytesBefore := metrics.PendingBlocksBytesCurrent + // Trigger the blocks -> channelQueue data pipelining require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) - require.NotEmpty(t, m.channelQueue) + require.Len(t, m.channelQueue, 2) require.NoError(t, m.processBlocks()) // Assert that at least one block was processed into the channel - require.NotContains(t, m.blocks, blockA) + require.Equal(t, 1, m.blockCursor) + + // Check metric decreased + metricsDelta := metrics.PendingBlocksBytesCurrent - pendingBytesBefore + require.Negative(t, metricsDelta) + + l1OriginBefore := m.l1OriginLastSubmittedChannel - // Call the function we are testing - m.Requeue(m.defaultCfg) + m.handleChannelInvalidated(m.currentChannel) // Ensure we got back to the state above require.Equal(t, m.blocks, stateSnapshot) - require.Empty(t, m.channelQueue) + require.Contains(t, m.channelQueue, oldChannel) + require.Len(t, m.channelQueue, 1) + + // Check metric came back up to previous value + require.Equal(t, pendingBytesBefore, metrics.PendingBlocksBytesCurrent) + + // Ensure the l1OridingLastSubmittedChannel was + // not changed. This ensures the next channel + // has its duration timeout deadline computed + // properly. + require.Equal(t, l1OriginBefore, m.l1OriginLastSubmittedChannel) // Trigger the blocks -> channelQueue data pipelining again require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) require.NotEmpty(t, m.channelQueue) require.NoError(t, m.processBlocks()) +} - // Assert that at least one block was processed into the channel - require.NotContains(t, m.blocks, blockA) +func TestChannelManager_PruneBlocks(t *testing.T) { + l := testlog.Logger(t, log.LevelDebug) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - // Now mark the 0th channel in the queue as already - // starting to send on chain - channel0 := m.channelQueue[0] - channel0.pendingTransactions["foo"] = txData{} - require.False(t, channel0.NoneSubmitted()) + a := types.NewBlock(&types.Header{ + Number: big.NewInt(0), + }, nil, nil, nil) + b := types.NewBlock(&types.Header{ // This will shortly become the safe head + Number: big.NewInt(1), + ParentHash: a.Hash(), + }, nil, nil, nil) + c := types.NewBlock(&types.Header{ + Number: big.NewInt(2), + ParentHash: b.Hash(), + }, nil, nil, nil) + + require.NoError(t, m.AddL2Block(a)) + m.blockCursor += 1 + require.NoError(t, m.AddL2Block(b)) + m.blockCursor += 1 + require.NoError(t, m.AddL2Block(c)) + m.blockCursor += 1 + + // Normal path + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: b.Hash(), + Number: b.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) + + // Safe chain didn't move, nothing to prune + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: b.Hash(), + Number: b.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{c}, m.blocks) - // Call the function we are testing - m.Requeue(m.defaultCfg) + // Safe chain moved beyond the blocks we had + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: c.Hash(), + Number: uint64(99), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + + // No blocks to prune, NOOP + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: c.Hash(), + Number: c.NumberU64(), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) - // The requeue shouldn't affect the pending channel - require.Contains(t, m.channelQueue, channel0) + // Put another block in + d := types.NewBlock(&types.Header{ + Number: big.NewInt(3), + ParentHash: c.Hash(), + }, nil, nil, nil) + require.NoError(t, m.AddL2Block(d)) + m.blockCursor += 1 + + // Safe chain reorg + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: a.Hash(), + Number: uint64(3), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + + // Put another block in + require.NoError(t, m.AddL2Block(d)) + m.blockCursor += 1 + + // Safe chain reversed + // state should be cleared + m.pruneSafeBlocks(eth.L2BlockRef{ + Hash: a.Hash(), // unused + Number: uint64(1), + }) + require.Equal(t, queue.Queue[*types.Block]{}, m.blocks) + +} + +func TestChannelManager_PruneChannels(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + cfg.InitNoneCompressor() + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + A, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + B, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + C, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + + m.channelQueue = []*channel{A, B, C} + + numTx := 1 + rng := rand.New(rand.NewSource(123)) + a0 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + a0 = a0.WithSeal(&types.Header{Number: big.NewInt(0)}) + a1 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + a1 = a1.WithSeal(&types.Header{Number: big.NewInt(1)}) + b2 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + b2 = b2.WithSeal(&types.Header{Number: big.NewInt(2)}) + b3 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + b3 = b3.WithSeal(&types.Header{Number: big.NewInt(3)}) + c4 := derivetest.RandomL2BlockWithChainId(rng, numTx, defaultTestRollupConfig.L2ChainID) + c4 = c4.WithSeal(&types.Header{Number: big.NewInt(4)}) + + _, err = A.AddBlock(a0) + require.NoError(t, err) + _, err = A.AddBlock(a1) + require.NoError(t, err) + + _, err = B.AddBlock(b2) + require.NoError(t, err) + _, err = B.AddBlock(b3) + require.NoError(t, err) + + _, err = C.AddBlock(c4) + require.NoError(t, err) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(3), + }) + + require.Equal(t, []*channel{C}, m.channelQueue) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(4), + }) + + require.Equal(t, []*channel{}, m.channelQueue) + + m.pruneChannels(eth.L2BlockRef{ + Number: uint64(4), + }) + + require.Equal(t, []*channel{}, m.channelQueue) - require.NotContains(t, m.blocks, blockA) } func TestChannelManager_ChannelOutFactory(t *testing.T) { type ChannelOutWrapper struct { @@ -690,3 +627,57 @@ func TestChannelManager_ChannelOutFactory(t *testing.T) { require.IsType(t, &ChannelOutWrapper{}, m.currentChannel.channelBuilder.co) } + +func TestChannelManager_CheckExpectedProgress(t *testing.T) { + l := testlog.Logger(t, log.LevelCrit) + cfg := channelManagerTestConfig(100, derive.SingularBatchType) + cfg.InitNoneCompressor() + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + channelMaxInclusionBlockNumber := uint64(3) + channelLatestSafeBlockNumber := uint64(11) + + // Prepare a (dummy) fully submitted channel + // with + // maxInclusionBlock and latest safe block number as above + A, err := newChannelWithChannelOut(l, metrics.NoopMetrics, cfg, m.rollupCfg, 0) + require.NoError(t, err) + rng := rand.New(rand.NewSource(123)) + a0 := derivetest.RandomL2BlockWithChainId(rng, 1, defaultTestRollupConfig.L2ChainID) + a0 = a0.WithSeal(&types.Header{Number: big.NewInt(int64(channelLatestSafeBlockNumber))}) + _, err = A.AddBlock(a0) + require.NoError(t, err) + A.maxInclusionBlock = channelMaxInclusionBlockNumber + A.Close() + A.channelBuilder.frames = nil + A.channelBuilder.frameCursor = 0 + require.True(t, A.isFullySubmitted()) + + m.channelQueue = append(m.channelQueue, A) + + // The current L1 number implies that + // channel A above should have been derived + // from, so we expect safe head to progress to + // the channelLatestSafeBlockNumber. + // Since the safe head moved to 11, there is no error: + ss := eth.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: channelMaxInclusionBlockNumber + 1}, + SafeL2: eth.L2BlockRef{Number: channelLatestSafeBlockNumber}, + } + err = m.CheckExpectedProgress(ss) + require.NoError(t, err) + + // If the currentL1 is as above but the + // safe head is less than channelLatestSafeBlockNumber, + // the method should return an error: + ss.SafeL2 = eth.L2BlockRef{Number: channelLatestSafeBlockNumber - 1} + err = m.CheckExpectedProgress(ss) + require.Error(t, err) + + // If the safe head is still less than channelLatestSafeBlockNumber + // but the currentL1 is _equal_ to the channelMaxInclusionBlockNumber + // there should be no error as that block is still being derived from: + ss.CurrentL1 = eth.L1BlockRef{Number: channelMaxInclusionBlockNumber} + err = m.CheckExpectedProgress(ss) + require.NoError(t, err) +} diff --git a/op-batcher/batcher/channel_test.go b/op-batcher/batcher/channel_test.go index 0aad780131c..b36ce9311bc 100644 --- a/op-batcher/batcher/channel_test.go +++ b/op-batcher/batcher/channel_test.go @@ -113,7 +113,7 @@ func TestChannelManager_NextTxData(t *testing.T) { frameNumber: uint16(0), }, } - channel.channelBuilder.PushFrames(frame) + channel.channelBuilder.frames = append(channel.channelBuilder.frames, frame) require.Equal(t, 1, channel.PendingFrames()) // Now the nextTxData function should return the frame @@ -142,7 +142,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) { mockframes := makeMockFrameDatas(chID, n+1) // put multiple frames into channel, but less than target - ch.channelBuilder.PushFrames(mockframes[:n-1]...) + ch.channelBuilder.frames = mockframes[:n-1] requireTxData := func(i int) { require.True(ch.HasTxData(), "expected tx data %d", i) @@ -160,7 +160,7 @@ func TestChannel_NextTxData_singleFrameTx(t *testing.T) { require.False(ch.HasTxData()) // put in last two - ch.channelBuilder.PushFrames(mockframes[n-1 : n+1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[n-1:n+1]...) for i := n - 1; i < n+1; i++ { requireTxData(i) } @@ -183,11 +183,11 @@ func TestChannel_NextTxData_multiFrameTx(t *testing.T) { mockframes := makeMockFrameDatas(chID, n+1) // put multiple frames into channel, but less than target - ch.channelBuilder.PushFrames(mockframes[:n-1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[:n-1]...) require.False(ch.HasTxData()) // put in last two - ch.channelBuilder.PushFrames(mockframes[n-1 : n+1]...) + ch.channelBuilder.frames = append(ch.channelBuilder.frames, mockframes[n-1:n+1]...) require.True(ch.HasTxData()) txdata := ch.NextTxData() require.Len(txdata.frames, n) @@ -240,7 +240,8 @@ func TestChannelTxConfirmed(t *testing.T) { frameNumber: uint16(0), }, } - m.currentChannel.channelBuilder.PushFrames(frame) + m.currentChannel.channelBuilder.frames = append(m.currentChannel.channelBuilder.frames, frame) + require.Equal(t, 1, m.currentChannel.PendingFrames()) returnedTxData, err := m.nextTxData(m.currentChannel) expectedTxData := singleFrameTxData(frame) @@ -291,7 +292,7 @@ func TestChannelTxFailed(t *testing.T) { frameNumber: uint16(0), }, } - m.currentChannel.channelBuilder.PushFrames(frame) + m.currentChannel.channelBuilder.frames = append(m.currentChannel.channelBuilder.frames, frame) require.Equal(t, 1, m.currentChannel.PendingFrames()) returnedTxData, err := m.nextTxData(m.currentChannel) expectedTxData := singleFrameTxData(frame) diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index 6e237b176d8..dde08ac8419 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "math" "math/big" _ "net/http/pprof" "sync" @@ -112,7 +111,7 @@ type BatchSubmitter struct { running bool txpoolMutex sync.Mutex // guards txpoolState and txpoolBlockedBlob - txpoolState int + txpoolState TxPoolState txpoolBlockedBlob bool // lastStoredBlock is the last block loaded into `state`. If it is empty it should be set to the l2 safe head. @@ -161,8 +160,20 @@ func (l *BatchSubmitter) StartBatchSubmitting() error { } } - l.wg.Add(1) - go l.loop() + receiptsCh := make(chan txmgr.TxReceipt[txRef]) + receiptsLoopCtx, cancelReceiptsLoopCtx := context.WithCancel(context.Background()) + throttlingLoopCtx, cancelThrottlingLoopCtx := context.WithCancel(context.Background()) + + // DA throttling loop should always be started except for testing (indicated by ThrottleInterval == 0) + if l.Config.ThrottleInterval > 0 { + l.wg.Add(1) + go l.throttlingLoop(throttlingLoopCtx) + } else { + l.Log.Warn("Throttling loop is DISABLED due to 0 throttle-interval. This should not be disabled in prod.") + } + l.wg.Add(2) + go l.processReceiptsLoop(receiptsLoopCtx, receiptsCh) // receives from receiptsCh + go l.mainLoop(l.shutdownCtx, receiptsCh, cancelReceiptsLoopCtx, cancelThrottlingLoopCtx) // sends on receiptsCh l.Log.Info("Batch Submitter started") return nil @@ -241,11 +252,12 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { // 2. Check if the sync status is valid or if we are all the way up to date // 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) // 4. Load all new blocks into the local state. +// 5. Dequeue blocks from local state which are now safe. // // If there is a reorg, it will reset the last stored block but not clear the internal state so // the state can be flushed to L1. -func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context) error { - start, end, err := l.calculateL2BlockRangeToStore(ctx) +func (l *BatchSubmitter) loadBlocksIntoState(syncStatus eth.SyncStatus, ctx context.Context) error { + start, end, err := l.calculateL2BlockRangeToStore(syncStatus) if err != nil { l.Log.Warn("Error calculating L2 block range", "err", err) return err @@ -308,12 +320,10 @@ func (l *BatchSubmitter) loadBlockIntoState(ctx context.Context, blockNumber uin return block, nil } -// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state. -// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions) -func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth.BlockID, eth.BlockID, error) { +func (l *BatchSubmitter) getSyncStatus(ctx context.Context) (*eth.SyncStatus, error) { rollupClient, err := l.EndpointProvider.RollupClient(ctx) if err != nil { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("getting rollup client: %w", err) + return nil, fmt.Errorf("getting rollup client: %w", err) } var ( @@ -331,7 +341,7 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Ensure that we have the sync status if err != nil { - return eth.BlockID{}, eth.BlockID{}, fmt.Errorf("failed to get sync status: %w", err) + return nil, fmt.Errorf("failed to get sync status: %w", err) } // If we have a head, break out of the loop @@ -348,10 +358,21 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Reset timer to tick of the new backoff time again timer.Reset(backoff) case <-ctx.Done(): - return eth.BlockID{}, eth.BlockID{}, ctx.Err() + return nil, ctx.Err() } } + return syncStatus, nil +} + +// calculateL2BlockRangeToStore determines the range (start,end] that should be loaded into the local state. +// It also takes care of initializing some local state (i.e. will modify l.lastStoredBlock in certain conditions +// as well as garbage collecting blocks which became safe) +func (l *BatchSubmitter) calculateL2BlockRangeToStore(syncStatus eth.SyncStatus) (eth.BlockID, eth.BlockID, error) { + if syncStatus.HeadL1 == (eth.L1BlockRef{}) { + return eth.BlockID{}, eth.BlockID{}, errors.New("empty sync status") + } + // Check last stored to see if it needs to be set on startup OR set if is lagged behind. // It lagging implies that the op-node processed some batches that were submitted prior to the current instance of the batcher being alive. if l.lastStoredBlock == (eth.BlockID{}) { @@ -381,6 +402,8 @@ func (l *BatchSubmitter) calculateL2BlockRangeToStore(ctx context.Context) (eth. // Submitted batch, but it is not valid // Missed L2 block somehow. +type TxPoolState int + const ( // Txpool states. Possible state transitions: // TxpoolGood -> TxpoolBlocked: @@ -390,13 +413,29 @@ const ( // send a cancellation transaction. // TxpoolCancelPending -> TxpoolGood: // happens once the cancel transaction completes, whether successfully or in error. - TxpoolGood int = iota + TxpoolGood TxPoolState = iota TxpoolBlocked TxpoolCancelPending ) -func (l *BatchSubmitter) loop() { +// setTxPoolState locks the mutex, sets the parameters to the supplied ones, and release the mutex. +func (l *BatchSubmitter) setTxPoolState(txPoolState TxPoolState, txPoolBlockedBlob bool) { + l.txpoolMutex.Lock() + l.txpoolState = txPoolState + l.txpoolBlockedBlob = txPoolBlockedBlob + l.txpoolMutex.Unlock() +} + +// mainLoop periodically: +// - polls the sequencer, +// - prunes the channel manager state (i.e. safe blocks) +// - loads unsafe blocks from the sequencer +// - drives the creation of channels and frames +// - sends transactions to the DA layer +func (l *BatchSubmitter) mainLoop(ctx context.Context, receiptsCh chan txmgr.TxReceipt[txRef], receiptsLoopCancel, throttlingLoopCancel context.CancelFunc) { defer l.wg.Done() + defer receiptsLoopCancel() + defer throttlingLoopCancel() queue := txmgr.NewQueue[txRef](l.killCtx, l.Txmgr, l.Config.MaxPendingTransactions) daGroup := &errgroup.Group{} @@ -410,111 +449,70 @@ func (l *BatchSubmitter) loop() { l.txpoolState = TxpoolGood l.txpoolMutex.Unlock() - // start the receipt/result processing loop - receiptsLoopDone := make(chan struct{}) - defer close(receiptsLoopDone) // shut down receipt loop l.l2BlockAdded = make(chan struct{}) defer close(l.l2BlockAdded) - receiptsCh := make(chan txmgr.TxReceipt[txRef]) - go l.processReceiptsLoop(receiptsCh, receiptsLoopDone) - - // DA throttling loop should always be started except for testing (indicated by ThrottleInterval == 0) - if l.Config.ThrottleInterval > 0 { - throttlingLoopDone := make(chan struct{}) - defer close(throttlingLoopDone) - go l.throttlingLoop(throttlingLoopDone) - } else { - l.Log.Warn("Throttling loop is DISABLED due to 0 throttle-interval. This should not be disabled in prod.") - } ticker := time.NewTicker(l.Config.PollInterval) defer ticker.Stop() - publishAndWait := func() { - l.publishStateToL1(queue, receiptsCh, daGroup, time.Duration(math.MaxInt64)) - if !l.Txmgr.IsClosed() { - if l.Config.UseAltDA { - l.Log.Info("Waiting for altDA writes to complete...") - err := daGroup.Wait() - if err != nil { - l.Log.Error("Error returned by one of the altda goroutines waited on", "err", err) - } - } - l.Log.Info("Waiting for L1 txs to be confirmed...") - err := queue.Wait() - if err != nil { - l.Log.Error("Error returned by one of the txmgr goroutines waited on", "err", err) - } - } else { - l.Log.Info("Txmgr is closed, remaining channel data won't be sent") - } - } - for { select { case <-ticker.C: + if !l.checkTxpool(queue, receiptsCh) { continue } - if err := l.loadBlocksIntoState(l.shutdownCtx); errors.Is(err, ErrReorg) { - err := l.state.Close() - if err != nil { - if errors.Is(err, ErrPendingAfterClose) { - l.Log.Warn("Closed channel manager to handle L2 reorg with pending channel(s) remaining - submitting") - } else { - l.Log.Error("Error closing the channel manager to handle a L2 reorg", "err", err) - } - } - // on reorg we want to publish all pending state then wait until each result clears before resetting - // the state. - publishAndWait() - l.clearState(l.shutdownCtx) + + syncStatus, err := l.getSyncStatus(l.shutdownCtx) + if err != nil { + l.Log.Warn("could not get sync status", "err", err) continue } - l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) - case <-l.shutdownCtx.Done(): - if l.Txmgr.IsClosed() { - l.Log.Info("Txmgr is closed, remaining channel data won't be sent") - return - } - // This removes any never-submitted pending channels, so these do not have to be drained with transactions. - // Any remaining unfinished channel is terminated, so its data gets submitted. - err := l.state.Close() + + l.state.pruneSafeBlocks(syncStatus.SafeL2) + l.state.pruneChannels(syncStatus.SafeL2) + + err = l.state.CheckExpectedProgress(*syncStatus) if err != nil { - if errors.Is(err, ErrPendingAfterClose) { - l.Log.Warn("Closed channel manager on shutdown with pending channel(s) remaining - submitting") - } else { - l.Log.Error("Error closing the channel manager on shutdown", "err", err) - } + l.Log.Warn("error checking expected progress, clearing state and waiting for node sync", "err", err) + l.waitNodeSyncAndClearState() + continue + } + + if err := l.loadBlocksIntoState(*syncStatus, l.shutdownCtx); errors.Is(err, ErrReorg) { + l.Log.Warn("error loading blocks, clearing state and waiting for node sync", "err", err) + l.waitNodeSyncAndClearState() + continue } - publishAndWait() - l.Log.Info("Finished publishing all remaining channel data") + + l.publishStateToL1(queue, receiptsCh, daGroup, l.Config.PollInterval) + case <-ctx.Done(): + l.Log.Warn("main loop returning") return } } } -func (l *BatchSubmitter) processReceiptsLoop(receiptsCh chan txmgr.TxReceipt[txRef], receiptsLoopDone chan struct{}) { +// processReceiptsLoop handles transaction receipts from the DA layer +func (l *BatchSubmitter) processReceiptsLoop(ctx context.Context, receiptsCh chan txmgr.TxReceipt[txRef]) { + defer l.wg.Done() l.Log.Info("Starting receipts processing loop") for { select { case r := <-receiptsCh: - l.txpoolMutex.Lock() if errors.Is(r.Err, txpool.ErrAlreadyReserved) && l.txpoolState == TxpoolGood { - l.txpoolState = TxpoolBlocked - l.txpoolBlockedBlob = r.ID.isBlob - l.Log.Info("incompatible tx in txpool", "is_blob", r.ID.isBlob) + l.setTxPoolState(TxpoolBlocked, r.ID.isBlob) + l.Log.Warn("incompatible tx in txpool", "id", r.ID, "is_blob", r.ID.isBlob) } else if r.ID.isCancel && l.txpoolState == TxpoolCancelPending { // Set state to TxpoolGood even if the cancellation transaction ended in error // since the stuck transaction could have cleared while we were waiting. - l.txpoolState = TxpoolGood + l.setTxPoolState(TxpoolGood, l.txpoolBlockedBlob) l.Log.Info("txpool may no longer be blocked", "err", r.Err) } - l.txpoolMutex.Unlock() l.Log.Info("Handling receipt", "id", r.ID) l.handleReceipt(r) - case <-receiptsLoopDone: - l.Log.Info("Receipts processing loop done") + case <-ctx.Done(): + l.Log.Info("Receipt processing loop done") return } } @@ -524,7 +522,8 @@ func (l *BatchSubmitter) processReceiptsLoop(receiptsCh chan txmgr.TxReceipt[txR // throttling of incoming data prevent the backlog from growing too large. By looping & calling the miner API setter // continuously, we ensure the engine currently in use is always going to be reset to the proper throttling settings // even in the event of sequencer failover. -func (l *BatchSubmitter) throttlingLoop(throttlingLoopDone chan struct{}) { +func (l *BatchSubmitter) throttlingLoop(ctx context.Context) { + defer l.wg.Done() l.Log.Info("Starting DA throttling loop") ticker := time.NewTicker(l.Config.ThrottleInterval) defer ticker.Stop() @@ -576,13 +575,24 @@ func (l *BatchSubmitter) throttlingLoop(throttlingLoopDone chan struct{}) { updateParams() case <-ticker.C: updateParams() - case <-throttlingLoopDone: + case <-ctx.Done(): l.Log.Info("DA throttling loop done") return } } } +func (l *BatchSubmitter) waitNodeSyncAndClearState() { + // Wait for any in flight transactions + // to be ingested by the node before + // we start loading blocks again. + err := l.waitNodeSync() + if err != nil { + l.Log.Warn("error waiting for node sync", "err", err) + } + l.clearState(l.shutdownCtx) +} + // waitNodeSync Check to see if there was a batcher tx sent recently that // still needs more block confirmations before being considered finalized func (l *BatchSubmitter) waitNodeSync() error { diff --git a/op-batcher/cmd/main.go b/op-batcher/cmd/main.go index 82472006da2..39ca58b193c 100644 --- a/op-batcher/cmd/main.go +++ b/op-batcher/cmd/main.go @@ -18,7 +18,7 @@ import ( ) var ( - Version = "v0.10.14" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-batcher/justfile b/op-batcher/justfile new file mode 100644 index 00000000000..2647debbcb4 --- /dev/null +++ b/op-batcher/justfile @@ -0,0 +1,35 @@ +import '../just/go.just' + +# Build ldflags string +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "") + "'" + +BINARY := "./bin/op-batcher" + +# Build op-batcher binary +op-batcher: (go_build BINARY "./cmd" "-ldflags" _LDFLAGSSTRING) + +# Clean build artifacts +clean: + rm -f {{BINARY}} + +# Run tests +test: (go_test "./...") + +[private] +batcher_fuzz_task FUZZ TIME='10s': (go_fuzz FUZZ TIME "./batcher") + +# Run fuzzing tests +fuzz: + #!{{MAP_JUST}} batcher_fuzz_task + FuzzChannelConfig_CheckTimeout + FuzzDurationZero + FuzzDurationTimeoutMaxChannelDuration + FuzzDurationTimeoutZeroMaxChannelDuration + FuzzChannelCloseTimeout + FuzzChannelZeroCloseTimeout + FuzzSeqWindowClose + FuzzSeqWindowZeroTimeoutClose diff --git a/op-batcher/metrics/test.go b/op-batcher/metrics/test.go new file mode 100644 index 00000000000..76c365ea7e2 --- /dev/null +++ b/op-batcher/metrics/test.go @@ -0,0 +1,22 @@ +package metrics + +import ( + "github.com/ethereum/go-ethereum/core/types" +) + +type TestMetrics struct { + noopMetrics + PendingBlocksBytesCurrent float64 +} + +var _ Metricer = new(TestMetrics) + +func (m *TestMetrics) RecordL2BlockInPendingQueue(block *types.Block) { + _, rawSize := estimateBatchSize(block) + m.PendingBlocksBytesCurrent += float64(rawSize) + +} +func (m *TestMetrics) RecordL2BlockInChannel(block *types.Block) { + _, rawSize := estimateBatchSize(block) + m.PendingBlocksBytesCurrent -= float64(rawSize) +} diff --git a/op-batcher/readme.md b/op-batcher/readme.md new file mode 100644 index 00000000000..ba547845f09 --- /dev/null +++ b/op-batcher/readme.md @@ -0,0 +1,88 @@ +# op-batcher + +The `op-batcher` is responsible for ensuring data availability. See the [specs](https://specs.optimism.io/protocol/batcher.html). + + +## Interactions & Dependencies +The `op-batcher` works together with the [sequencer](../op-node/) (which it reads unsafe blocks from), the data availability layer (e.g. Layer 1 or an [Alt DA](../op-alt-da/) layer, which it posts data to), and the [derivation pipeline](../op-node/) (which reads the data from the DA layer and progresses the safe chain). + +It depends directly on some code shared with the derivation pipeline, namely the [`ChannelOut`](../op-node/rollup/derive/channel_out.go) implementation(s). It also depends directly on the shared [txmgr](../op-service/txmgr/) module. + +## Testing +The batcher has a suite of unit test which can be triggered by running +``` +go test ./... +``` +from this directory. There are also end-to-end tests in [`op-e2e`](../op-e2e/) which integrate the batcher. + +## Architecture + +The architecture of this batcher implementation is shown on the left side of the following diagram: + +![architecture](./architecture.png) + +Batch submitting (writing to the DA layer, in the middle of the diagram) works together with the derivation pipeline (on the right side of the diagram, reading from the DA layer) to progress the safe chain. + +The philosophy behind the current architecture is: +* Blocks, channels and frames are kept around for as long as they might be needed, and discarded as soon as they are not needed. They are not moved from one part of state to another. +* We retain block data in a strict order for as long as necessary. We only garbage collect frames, channels and blocks when the safe head moves sufficiently and those structures have done their job. +* When something goes wrong, we rewind the state cursors by the minimal amount we need to get going again. + + +### Happy path + +In the happy path, the batcher periodically: +1. Enqueues unsafe blocks and dequeues safe blocks from the sequencer to its internal state. +2. Enqueues a new channel, if necessary. +3. Processes some unprocessed blocks into the current channel, triggers the compression of the block data and the creation of frames. +4. Sends frames from the channel queue to the DA layer as (e.g. to Ethereum L1 as calldata or blob transactions). +5. If there is more transaction data to send, go to 2. Else wait for a tick and go to 1. + + +The `blockCursor` state variable tracks the next unprocessed block. +In each channel, the `frameCursor` tracks the next unsent frame. + + +### Reorgs +When an L2 unsafe reorg is detected, the batch submitter will reset its state, and wait for any in flight transactions to be ingested by the verifier nodes before starting work again. + +### Tx Failed +When a Tx fails, an asynchronous receipts handler is triggered. The channel from whence the Tx's frames came has its `frameCursor` rewound, so that all the frames can be resubmitted in order. + +### Channel Times Out +When a Tx is confirmed, an asynchronous receipts handler is triggered. We only update the batcher's state if the channel timed out on chain. In that case, the `blockCursor` is rewound to the first block added to that channel, and the channel queue is cleared out. This allows the batcher to start fresh building a new channel starting from the same block -- it does not need to refetch blocks from the sequencer. + +## Design Principles and Optimization Targets +At the current time, the batcher should be optimized for correctness, simplicity and robustness. It is considered preferable to prioritize these properties, even at the expense of other potentially desirable properties such as frugality. For example, it is preferable to have the batcher resubmit some data from time to time ("wasting" money on data availability costs) instead of avoiding that by e.g. adding some persistent state to the batcher. + +The batcher can almost always recover from unforeseen situations by being restarted. + + +Some complexity is permitted, however, for handling data availability switching, so that the batcher is not wasting money for longer periods of time. + +### Data Availability Backlog + +A chain can potentially experience an influx of large transactions whose data availability requirements exceed the total +throughput of the data availability layer. While this situation might resolve on its own in the long term through the +data availability pricing mechanism, in practice this feedback loop is too slow to prevent a very large backlog of data +from being produced, even at a relatively low cost to whomever is submitting the large transactions. In such +circumstances, the safe head can fall significantly behind the unsafe head, and the time between seeing a transaction +(and charging it a given L1 data fee) and actually posting the transaction to the data availability layer grows larger +and larger. Because DA costs can rise quickly during such an event, the batcher can end up paying far more to post the +transaction to the DA layer than what can be recovered from the transaction's data fee. + +To prevent a significant DA backlog, the batcher can instruct the block builder (via op-geth's miner RPC API) to impose +thresholds on the total DA requirements of a single block, and/or the maximum DA requirement of any single +transaction. In the happy case, the batcher instructs the block builder to impose a block-level DA limit of +OP_BATCHER_THROTTLE_ALWAYS_BLOCK_SIZE, and imposes no additional limit on the DA requirements of a single +transaction. But in the case of a DA backlog (as defined by OP_BATCHER_THROTTLE_THRESHOLD), the batcher instructs the +block builder to instead impose a (tighter) block level limit of OP_BATCHER_THROTTLE_BLOCK_SIZE, and a single +transaction limit of OP_BATCHER_THROTTLE_TRANSACTION_SIZE. + +## Known issues and future work + +Link to [open issues with the `op-batcher` tag](https://github.com/ethereum-optimism/optimism/issues?q=is%3Aopen+is%3Aissue+label%3AA-op-batcher). + +The batcher launches L1 transactions in parallel so that it can achieve higher throughput, particularly in situations where there is a large backlog of data which needs to be posted. Sometimes, transactions can get stuck in the L1 mempool. The batcher does have functionality to clear these stuck transactions, but it is not completely reliable. + +The automatic data availability switching behavior is a somewhat new feature which may still have some bugs in it. diff --git a/op-chain-ops/contracts/common.go b/op-chain-ops/contracts/common.go deleted file mode 100644 index a1eb5b471db..00000000000 --- a/op-chain-ops/contracts/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package contracts - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/urfave/cli/v2" -) - -// parseAddress will parse a [common.Address] from a [cli.Context] and return -// an error if the configured address is not correct. -func parseAddress(ctx *cli.Context, name string) (common.Address, error) { - value := ctx.String(name) - if value == "" { - return common.Address{}, nil - } - if !common.IsHexAddress(value) { - return common.Address{}, fmt.Errorf("invalid address: %s", value) - } - return common.HexToAddress(value), nil -} diff --git a/op-chain-ops/contracts/contracts.go b/op-chain-ops/contracts/contracts.go deleted file mode 100644 index def9c374620..00000000000 --- a/op-chain-ops/contracts/contracts.go +++ /dev/null @@ -1,50 +0,0 @@ -package contracts - -import ( - "github.com/ethereum/go-ethereum/common" - "github.com/urfave/cli/v2" -) - -// Addresses represents the address values of various contracts. The values can -// be easily populated via a [cli.Context]. -type Addresses struct { - AddressManager common.Address - OptimismPortal common.Address - L1StandardBridge common.Address - L1CrossDomainMessenger common.Address - CanonicalTransactionChain common.Address - StateCommitmentChain common.Address -} - -// NewAddresses populates an Addresses struct given a [cli.Context]. -// This is useful for writing scripts that interact with smart contracts. -func NewAddresses(ctx *cli.Context) (*Addresses, error) { - var addresses Addresses - var err error - - addresses.AddressManager, err = parseAddress(ctx, "address-manager-address") - if err != nil { - return nil, err - } - addresses.OptimismPortal, err = parseAddress(ctx, "optimism-portal-address") - if err != nil { - return nil, err - } - addresses.L1StandardBridge, err = parseAddress(ctx, "l1-standard-bridge-address") - if err != nil { - return nil, err - } - addresses.L1CrossDomainMessenger, err = parseAddress(ctx, "l1-crossdomain-messenger-address") - if err != nil { - return nil, err - } - addresses.CanonicalTransactionChain, err = parseAddress(ctx, "canonical-transaction-chain-address") - if err != nil { - return nil, err - } - addresses.StateCommitmentChain, err = parseAddress(ctx, "state-commitment-chain-address") - if err != nil { - return nil, err - } - return &addresses, nil -} diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 7ed0a6e2f68..69d8999880b 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -854,12 +854,6 @@ func (d *L1DependenciesConfig) CheckAddresses(dependencyContext DependencyContex // The genesis generation may log warnings, do a best-effort support attempt, // or ignore these attributes completely. type LegacyDeployConfig struct { - // CliqueSignerAddress represents the signer address for the clique consensus engine. - // It is used in the multi-process devnet to sign blocks. - CliqueSignerAddress common.Address `json:"cliqueSignerAddress"` - // L1UseClique represents whether or not to use the clique consensus engine. - L1UseClique bool `json:"l1UseClique"` - // DeploymentWaitConfirmations is the number of confirmations to wait during // deployment. This is DEPRECATED and should be removed in a future PR. DeploymentWaitConfirmations int `json:"deploymentWaitConfirmations"` diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index c852fa0fc10..ee783685235 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -141,25 +140,12 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { LondonBlock: big.NewInt(0), ArrowGlacierBlock: big.NewInt(0), GrayGlacierBlock: big.NewInt(0), - ShanghaiTime: nil, - CancunTime: nil, - } - - extraData := make([]byte, 0) - if config.L1UseClique { - // warning: clique has an overly strict block header timestamp check against the system wallclock, - // causing blocks to get scheduled as "future block" and not get mined instantly when produced. - chainConfig.Clique = ¶ms.CliqueConfig{ - Period: config.L1BlockTime, - Epoch: 30000, - } - extraData = append(append(make([]byte, 32), config.CliqueSignerAddress[:]...), make([]byte, crypto.SignatureLength)...) - } else { - chainConfig.MergeNetsplitBlock = big.NewInt(0) - chainConfig.TerminalTotalDifficulty = big.NewInt(0) - chainConfig.TerminalTotalDifficultyPassed = true - chainConfig.ShanghaiTime = u64ptr(0) - chainConfig.CancunTime = u64ptr(0) + ShanghaiTime: u64ptr(0), + CancunTime: u64ptr(0), + // To enable post-Merge consensus at genesis + MergeNetsplitBlock: big.NewInt(0), + TerminalTotalDifficulty: big.NewInt(0), + TerminalTotalDifficultyPassed: true, } gasLimit := config.L1GenesisBlockGasLimit @@ -178,7 +164,7 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { if timestamp == 0 { timestamp = hexutil.Uint64(time.Now().Unix()) } - if !config.L1UseClique && config.L1CancunTimeOffset != nil { + if config.L1CancunTimeOffset != nil { cancunTime := uint64(timestamp) + uint64(*config.L1CancunTimeOffset) chainConfig.CancunTime = &cancunTime } @@ -187,7 +173,7 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { Config: &chainConfig, Nonce: uint64(config.L1GenesisBlockNonce), Timestamp: uint64(timestamp), - ExtraData: extraData, + ExtraData: make([]byte, 0), GasLimit: uint64(gasLimit), Difficulty: difficulty.ToInt(), Mixhash: config.L1GenesisBlockMixHash, diff --git a/op-chain-ops/genesis/testdata/test-deploy-config-full.json b/op-chain-ops/genesis/testdata/test-deploy-config-full.json index 814fff245b0..7fe9a78e715 100644 --- a/op-chain-ops/genesis/testdata/test-deploy-config-full.json +++ b/op-chain-ops/genesis/testdata/test-deploy-config-full.json @@ -6,8 +6,6 @@ "maxSequencerDrift": 20, "sequencerWindowSize": 100, "channelTimeout": 30, - "l1UseClique": false, - "cliqueSignerAddress": "0x0000000000000000000000000000000000000000", "customGasTokenAddress": "0x0000000000000000000000000000000000000000", "p2pSequencerAddress": "0x9965507d1a55bcc2695c58ba16fb37d819b0a4dc", "batchInboxAddress": "0x42000000000000000000000000000000000000ff", diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index 942588a9929..948d9daa305 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -35,7 +35,7 @@ type SuperFaultProofConfig struct { } type OPCMImplementationsConfig struct { - Release string + L1ContractsRelease string FaultProof SuperFaultProofConfig diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 476406821ea..e915b724e10 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -170,10 +170,9 @@ func DeploySuperchainToL1(l1Host *script.Host, superCfg *SuperchainConfig) (*Sup ProofMaturityDelaySeconds: superCfg.Implementations.FaultProof.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: superCfg.Implementations.FaultProof.DisputeGameFinalityDelaySeconds, MipsVersion: superCfg.Implementations.FaultProof.MipsVersion, - Release: superCfg.Implementations.Release, + L1ContractsRelease: superCfg.Implementations.L1ContractsRelease, SuperchainConfigProxy: superDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, - OpcmProxyOwner: superDeployment.SuperchainProxyAdmin, UseInterop: superCfg.Implementations.UseInterop, StandardVersionsToml: standard.VersionsMainnetData, }) @@ -210,7 +209,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), - OpcmProxy: superDeployment.OpcmProxy, + Opcm: superDeployment.Opcm, SaltMixer: cfg.SaltMixer, GasLimit: cfg.GasLimit, DisputeGameType: cfg.DisputeGameType, diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index ba18fbfdf9b..f98a0554d87 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -9,8 +9,7 @@ type L1Deployment struct { } type Implementations struct { - OpcmProxy common.Address `json:"OPCMProxy"` - OpcmImpl common.Address `json:"OPCMImpl"` + Opcm common.Address `json:"OPCM"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` OptimismPortalImpl common.Address `json:"OptimismPortalImpl"` PreimageOracleSingleton common.Address `json:"PreimageOracleSingleton"` diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 8983ee72da8..e70c69e9f48 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -69,7 +69,7 @@ func (r *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, error) ProtocolVersionsOwner: superchainProtocolVersionsOwner, Deployer: superchainDeployer, Implementations: OPCMImplementationsConfig{ - Release: "dev", + L1ContractsRelease: "dev", FaultProof: SuperFaultProofConfig{ WithdrawalDelaySeconds: big.NewInt(604800), MinProposalSizeBytes: big.NewInt(10000), diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index 3ce493487f7..9a3d9ae8020 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -609,7 +609,7 @@ func (h *Host) onOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpCo }) } // Sanity check that top of the call-stack matches the scope context now - if len(h.callStack) == 0 || h.callStack[len(h.callStack)-1].Ctx != scopeCtx { + if h.callStack[len(h.callStack)-1].Ctx != scopeCtx { panic("scope context changed without call-frame pop/push") } cf := h.callStack[len(h.callStack)-1] diff --git a/op-challenger/README.md b/op-challenger/README.md index 97fafab8e6f..5efbbd85b3d 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -1,7 +1,7 @@ # op-challenger The `op-challenger` is a modular **op-stack** challenge agent written in -golang for dispute games including, but not limited to,attestation games, +golang for dispute games including, but not limited to, attestation games, fault games, and validity games. To learn more about dispute games, visit the [fault proof specs][proof-specs]. diff --git a/op-conductor/RUNBOOK.md b/op-conductor/RUNBOOK.md index 8e80fde6b6c..00b8757a338 100644 --- a/op-conductor/RUNBOOK.md +++ b/op-conductor/RUNBOOK.md @@ -28,7 +28,7 @@ OP_CONDUCTOR_NODE_RPC= # for example, http://op-node:8545 OP_CONDUCTOR_EXECUTION_RPC= # for example, http://op-geth:8545 OP_CONDUCTOR_NETWORK= # for example, base-mainnet, op-mainnet, etc, should be same as OP_NODE_NETWORK OP_CONDUCTOR_HEALTHCHECK_INTERVAL= # in seconds -OP_CONDUCTOR_HEALTHCHECK_UNSAFE_INTERVAL= # Interval allowed between unsafe head and now measured in seconds in seconds +OP_CONDUCTOR_HEALTHCHECK_UNSAFE_INTERVAL= # Interval allowed between unsafe head and now measured in seconds OP_CONDUCTOR_HEALTHCHECK_MIN_PEER_COUNT= # minimum number of peers required to be considered healthy OP_CONDUCTOR_RAFT_BOOTSTRAP=true/false # set to true if you want to bootstrap the raft cluster ``` diff --git a/op-deployer/Dockerfile.default b/op-deployer/Dockerfile.default index cc5ca8d95e4..0821bc0c48e 100644 --- a/op-deployer/Dockerfile.default +++ b/op-deployer/Dockerfile.default @@ -1,3 +1,9 @@ FROM debian:bookworm-20240812-slim ENTRYPOINT ["/op-deployer"] -COPY op-deployer /op-deployer \ No newline at end of file +COPY op-deployer /op-deployer + +# Install ca-certificates so that HTTPS requests work +RUN apt-get update && apt-get install -y ca-certificates + +# Symlink onto the PATH +RUN ln -s /op-deployer /usr/local/bin/op-deployer \ No newline at end of file diff --git a/op-deployer/pkg/deployer/artifacts/downloader.go b/op-deployer/pkg/deployer/artifacts/downloader.go index 1303adbe86a..7e566952e09 100644 --- a/op-deployer/pkg/deployer/artifacts/downloader.go +++ b/op-deployer/pkg/deployer/artifacts/downloader.go @@ -3,8 +3,10 @@ package artifacts import ( "archive/tar" "bufio" + "bytes" "compress/gzip" "context" + "crypto/sha256" "errors" "fmt" "io" @@ -15,6 +17,8 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum/go-ethereum/log" @@ -41,15 +45,50 @@ func LogProgressor(lgr log.Logger) DownloadProgressor { func Download(ctx context.Context, loc *Locator, progress DownloadProgressor) (foundry.StatDirFs, CleanupFunc, error) { var u *url.URL var err error + var checker integrityChecker if loc.IsTag() { u, err = standard.ArtifactsURLForTag(loc.Tag) if err != nil { return nil, nil, fmt.Errorf("failed to get standard artifacts URL for tag %s: %w", loc.Tag, err) } + + hash, err := standard.ArtifactsHashForTag(loc.Tag) + if err != nil { + return nil, nil, fmt.Errorf("failed to get standard artifacts hash for tag %s: %w", loc.Tag, err) + } + + checker = &hashIntegrityChecker{hash: hash} } else { u = loc.URL + checker = &noopIntegrityChecker{} } + return downloadURL(ctx, u, progress, checker) +} + +type integrityChecker interface { + CheckIntegrity(data []byte) error +} + +type hashIntegrityChecker struct { + hash common.Hash +} + +func (h *hashIntegrityChecker) CheckIntegrity(data []byte) error { + hash := sha256.Sum256(data) + if hash != h.hash { + return fmt.Errorf("integrity check failed - expected: %x, got: %x", h.hash, hash) + } + return nil +} + +type noopIntegrityChecker struct{} + +func (noopIntegrityChecker) CheckIntegrity(data []byte) error { + return nil +} + +func downloadURL(ctx context.Context, u *url.URL, progress DownloadProgressor, checker integrityChecker) (foundry.StatDirFs, CleanupFunc, error) { switch u.Scheme { case "http", "https": req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil) @@ -78,7 +117,16 @@ func Download(ctx context.Context, loc *Locator, progress DownloadProgressor) (f total: resp.ContentLength, } - gr, err := gzip.NewReader(pr) + data, err := io.ReadAll(pr) + if err != nil { + return nil, nil, fmt.Errorf("failed to read response body: %w", err) + } + + if err := checker.CheckIntegrity(data); err != nil { + return nil, nil, fmt.Errorf("failed to check integrity: %w", err) + } + + gr, err := gzip.NewReader(bytes.NewReader(data)) if err != nil { return nil, nil, fmt.Errorf("failed to create gzip reader: %w", err) } @@ -111,7 +159,6 @@ type progressReader struct { } func (pr *progressReader) Read(p []byte) (int, error) { - n, err := pr.r.Read(p) pr.curr += int64(n) if pr.progress != nil && time.Since(pr.lastPrint) > 1*time.Second { diff --git a/op-deployer/pkg/deployer/artifacts/downloader_test.go b/op-deployer/pkg/deployer/artifacts/downloader_test.go index e66b41f96a8..cf4ef4742c9 100644 --- a/op-deployer/pkg/deployer/artifacts/downloader_test.go +++ b/op-deployer/pkg/deployer/artifacts/downloader_test.go @@ -9,10 +9,12 @@ import ( "os" "testing" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" ) -func TestDownloadArtifacts(t *testing.T) { +func TestDownloadArtifacts_MockArtifacts(t *testing.T) { f, err := os.OpenFile("testdata/artifacts.tar.gz", os.O_RDONLY, 0o644) require.NoError(t, err) defer f.Close() @@ -21,6 +23,9 @@ func TestDownloadArtifacts(t *testing.T) { w.WriteHeader(http.StatusOK) _, err := io.Copy(w, f) require.NoError(t, err) + // Seek to beginning of file for next request + _, err = f.Seek(0, 0) + require.NoError(t, err) })) defer ts.Close() @@ -31,14 +36,50 @@ func TestDownloadArtifacts(t *testing.T) { URL: artifactsURL, } - fs, cleanup, err := Download(ctx, loc, nil) - require.NoError(t, err) - require.NotNil(t, fs) - defer func() { - require.NoError(t, cleanup()) - }() + t.Run("success", func(t *testing.T) { + fs, cleanup, err := Download(ctx, loc, nil) + require.NoError(t, err) + require.NotNil(t, fs) + defer func() { + require.NoError(t, cleanup()) + }() - info, err := fs.Stat("WETH98.sol/WETH98.json") - require.NoError(t, err) - require.Greater(t, info.Size(), int64(0)) + info, err := fs.Stat("WETH98.sol/WETH98.json") + require.NoError(t, err) + require.Greater(t, info.Size(), int64(0)) + }) + + t.Run("bad integrity", func(t *testing.T) { + _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ + hash: common.Hash{'B', 'A', 'D'}, + }) + require.Error(t, err) + require.ErrorContains(t, err, "integrity check failed") + }) + + t.Run("ok integrity", func(t *testing.T) { + _, _, err := downloadURL(ctx, loc.URL, nil, &hashIntegrityChecker{ + hash: common.HexToHash("0x0f814df0c4293aaaadd468ac37e6c92f0b40fd21df848076835cb2c21d2a516f"), + }) + require.NoError(t, err) + }) +} + +func TestDownloadArtifacts_TaggedVersions(t *testing.T) { + tags := []string{ + "op-contracts/v1.6.0", + "op-contracts/v1.7.0-beta.1+l2-contracts", + } + for _, tag := range tags { + t.Run(tag, func(t *testing.T) { + t.Parallel() + + loc := MustNewLocatorFromTag(tag) + _, cleanup, err := Download(context.Background(), loc, nil) + t.Cleanup(func() { + require.NoError(t, cleanup()) + }) + require.NoError(t, err) + }) + } } diff --git a/op-deployer/pkg/deployer/artifacts/locator.go b/op-deployer/pkg/deployer/artifacts/locator.go index 160e8790420..aa44d43644c 100644 --- a/op-deployer/pkg/deployer/artifacts/locator.go +++ b/op-deployer/pkg/deployer/artifacts/locator.go @@ -24,6 +24,22 @@ var DefaultL2ContractsLocator = &Locator{ Tag: standard.DefaultL2ContractsTag, } +func NewLocatorFromTag(tag string) (*Locator, error) { + loc := new(Locator) + if err := loc.UnmarshalText([]byte("tag://" + tag)); err != nil { + return nil, fmt.Errorf("failed to unmarshal tag: %w", err) + } + return loc, nil +} + +func MustNewLocatorFromTag(tag string) *Locator { + loc, err := NewLocatorFromTag(tag) + if err != nil { + panic(err) + } + return loc +} + type Locator struct { URL *url.URL Tag string diff --git a/op-deployer/pkg/deployer/bootstrap/opcm.go b/op-deployer/pkg/deployer/bootstrap/opcm.go index 2f5976f304a..89a8c3df512 100644 --- a/op-deployer/pkg/deployer/bootstrap/opcm.go +++ b/op-deployer/pkg/deployer/bootstrap/opcm.go @@ -164,10 +164,6 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { if err != nil { return fmt.Errorf("error getting standard versions TOML: %w", err) } - opcmProxyOwnerAddr, err := standard.ManagerOwnerAddrFor(chainIDU64) - if err != nil { - return fmt.Errorf("error getting superchain proxy admin: %w", err) - } signer := opcrypto.SignerFnFromBind(opcrypto.PrivateKeySignerFn(cfg.privateKeyECDSA, chainID)) chainDeployer := crypto.PubkeyToAddress(cfg.privateKeyECDSA.PublicKey) @@ -199,14 +195,14 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { } host.SetNonce(chainDeployer, nonce) - var release string + var l1ContractsRelease string if cfg.ArtifactsLocator.IsTag() { - release = cfg.ArtifactsLocator.Tag + l1ContractsRelease = cfg.ArtifactsLocator.Tag } else { - release = "dev" + l1ContractsRelease = "dev" } - lgr.Info("deploying OPCM", "release", release) + lgr.Info("deploying OPCM", "l1ContractsRelease", l1ContractsRelease) // We need to etch the Superchain addresses so that they have nonzero code // and the checks in the OPCM constructor pass. @@ -238,10 +234,9 @@ func OPCM(ctx context.Context, cfg OPCMConfig) error { ProofMaturityDelaySeconds: new(big.Int).SetUint64(cfg.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(cfg.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(cfg.MIPSVersion), - Release: release, + L1ContractsRelease: l1ContractsRelease, SuperchainConfigProxy: superchainConfigAddr, ProtocolVersionsProxy: protocolVersionsAddr, - OpcmProxyOwner: opcmProxyOwnerAddr, StandardVersionsToml: standardVersionsTOML, UseInterop: false, }, diff --git a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go index abe76d027ec..b04390fc8aa 100644 --- a/op-deployer/pkg/deployer/broadcaster/gas_estimator.go +++ b/op-deployer/pkg/deployer/broadcaster/gas_estimator.go @@ -11,15 +11,20 @@ import ( var ( // baseFeePadFactor = 50% as a divisor baseFeePadFactor = big.NewInt(2) - // tipMulFactor = 20 as a multiplier - tipMulFactor = big.NewInt(20) + // tipMulFactor = 5 as a multiplier + tipMulFactor = big.NewInt(5) // dummyBlobFee is a dummy value for the blob fee. Since this gas estimator will never // post blobs, it's just set to 1. dummyBlobFee = big.NewInt(1) + // maxTip is the maximum tip that can be suggested by this estimator. + maxTip = big.NewInt(50 * 1e9) + // minTip is the minimum tip that can be suggested by this estimator. + minTip = big.NewInt(1 * 1e9) ) // DeployerGasPriceEstimator is a custom gas price estimator for use with op-deployer. -// It pads the base fee by 50% and multiplies the suggested tip by 20. +// It pads the base fee by 50% and multiplies the suggested tip by 5 up to a max of +// 50 gwei. func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*big.Int, *big.Int, *big.Int, error) { chainHead, err := client.HeaderByNumber(ctx, nil) if err != nil { @@ -34,5 +39,14 @@ func DeployerGasPriceEstimator(ctx context.Context, client txmgr.ETHBackend) (*b baseFeePad := new(big.Int).Div(chainHead.BaseFee, baseFeePadFactor) paddedBaseFee := new(big.Int).Add(chainHead.BaseFee, baseFeePad) paddedTip := new(big.Int).Mul(tip, tipMulFactor) + + if paddedTip.Cmp(minTip) < 0 { + paddedTip.Set(minTip) + } + + if paddedTip.Cmp(maxTip) > 0 { + paddedTip.Set(maxTip) + } + return paddedTip, paddedBaseFee, dummyBlobFee, nil } diff --git a/op-deployer/pkg/deployer/broadcaster/keyed.go b/op-deployer/pkg/deployer/broadcaster/keyed.go index f5797d93915..c9bb27fcf0c 100644 --- a/op-deployer/pkg/deployer/broadcaster/keyed.go +++ b/op-deployer/pkg/deployer/broadcaster/keyed.go @@ -90,6 +90,9 @@ func NewKeyedBroadcaster(cfg KeyedBroadcasterOpts) (*KeyedBroadcaster, error) { } func (t *KeyedBroadcaster) Hook(bcast script.Broadcast) { + if bcast.Type != script.BroadcastCreate2 && bcast.From != t.mgr.From() { + panic(fmt.Sprintf("invalid from for broadcast:%v, expected:%v", bcast.From, t.mgr.From())) + } t.mtx.Lock() t.bcasts = append(t.bcasts, bcast) t.mtx.Unlock() diff --git a/op-deployer/pkg/deployer/inspect/l1.go b/op-deployer/pkg/deployer/inspect/l1.go index d0a4f88172d..4883e83486c 100644 --- a/op-deployer/pkg/deployer/inspect/l1.go +++ b/op-deployer/pkg/deployer/inspect/l1.go @@ -45,7 +45,7 @@ type OpChainDeployment struct { } type ImplementationsDeployment struct { - OpcmProxyAddress common.Address `json:"opcmProxyAddress"` + OpcmAddress common.Address `json:"opcmAddress"` DelayedWETHImplAddress common.Address `json:"delayedWETHImplAddress"` OptimismPortalImplAddress common.Address `json:"optimismPortalImplAddress"` PreimageOracleSingletonAddress common.Address `json:"preimageOracleSingletonAddress"` @@ -113,7 +113,7 @@ func L1(globalState *state.State, chainID common.Hash) (*L1Contracts, error) { // DelayedWETHPermissionlessGameProxyAddress: chainState.DelayedWETHPermissionlessGameProxyAddress, }, ImplementationsDeployment: ImplementationsDeployment{ - OpcmProxyAddress: globalState.ImplementationsDeployment.OpcmProxyAddress, + OpcmAddress: globalState.ImplementationsDeployment.OpcmAddress, DelayedWETHImplAddress: globalState.ImplementationsDeployment.DelayedWETHImplAddress, OptimismPortalImplAddress: globalState.ImplementationsDeployment.OptimismPortalImplAddress, PreimageOracleSingletonAddress: globalState.ImplementationsDeployment.PreimageOracleSingletonAddress, diff --git a/op-deployer/pkg/deployer/inspect/semvers.go b/op-deployer/pkg/deployer/inspect/semvers.go index da666096ee1..48e16d21dbc 100644 --- a/op-deployer/pkg/deployer/inspect/semvers.go +++ b/op-deployer/pkg/deployer/inspect/semvers.go @@ -8,6 +8,10 @@ import ( "regexp" "time" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" @@ -25,8 +29,6 @@ import ( "github.com/urfave/cli/v2" ) -var versionSelector = []byte{0x54, 0xfd, 0x4d, 0x50} - func L2SemversCLI(cliCtx *cli.Context) error { cliCfg, err := readConfig(cliCtx) if err != nil { @@ -67,6 +69,60 @@ func L2SemversCLI(cliCtx *cli.Context) error { } }() + ps, err := L2Semvers(L2SemversConfig{ + Lgr: l, + Artifacts: artifactsFS, + ChainState: chainState, + }) + if err != nil { + return fmt.Errorf("failed to get L2 semvers: %w", err) + } + + if err := jsonutil.WriteJSON(ps, ioutil.ToStdOutOrFileOrNoop(cliCfg.Outfile, 0o666)); err != nil { + return fmt.Errorf("failed to write rollup config: %w", err) + } + + return nil +} + +type L2SemversConfig struct { + Lgr log.Logger + Artifacts foundry.StatDirFs + ChainState *state.ChainState +} + +type L2PredeploySemvers struct { + L2ToL1MessagePasser string + DeployerWhitelist string + WETH string + L2CrossDomainMessenger string + L2StandardBridge string + SequencerFeeVault string + OptimismMintableERC20Factory string + L1BlockNumber string + GasPriceOracle string + L1Block string + LegacyMessagePasser string + L2ERC721Bridge string + OptimismMintableERC721Factory string + BaseFeeVault string + L1FeeVault string + SchemaRegistry string + EAS string + CrossL2Inbox string + L2toL2CrossDomainMessenger string + SuperchainWETH string + ETHLiquidity string + SuperchainTokenBridge string + OptimismMintableERC20 string + OptimismMintableERC721 string +} + +func L2Semvers(cfg L2SemversConfig) (*L2PredeploySemvers, error) { + l := cfg.Lgr + artifactsFS := cfg.Artifacts + chainState := cfg.ChainState + host, err := env.DefaultScriptHost( broadcaster.NoopBroadcaster(), l, @@ -74,85 +130,89 @@ func L2SemversCLI(cliCtx *cli.Context) error { artifactsFS, ) if err != nil { - return fmt.Errorf("failed to create script host: %w", err) + return nil, fmt.Errorf("failed to create script host: %w", err) } host.ImportState(chainState.Allocs.Data) - addr := common.Address{19: 0x01} - type contractToCheck struct { - Address common.Address - Name string + Address common.Address + FieldPtr *string + Name string } - contractsOutput := make(map[string]string) + var ps L2PredeploySemvers - // The gov token and the proxy admin do not have semvers. contracts := []contractToCheck{ - {predeploys.L2ToL1MessagePasserAddr, "L2ToL1MessagePasser"}, - {predeploys.DeployerWhitelistAddr, "DeployerWhitelist"}, - {predeploys.WETHAddr, "WETH"}, - {predeploys.L2CrossDomainMessengerAddr, "L2CrossDomainMessenger"}, - {predeploys.L2StandardBridgeAddr, "L2StandardBridge"}, - {predeploys.SequencerFeeVaultAddr, "SequencerFeeVault"}, - {predeploys.OptimismMintableERC20FactoryAddr, "OptimismMintableERC20Factory"}, - {predeploys.L1BlockNumberAddr, "L1BlockNumber"}, - {predeploys.GasPriceOracleAddr, "GasPriceOracle"}, - {predeploys.L1BlockAddr, "L1Block"}, - {predeploys.LegacyMessagePasserAddr, "LegacyMessagePasser"}, - {predeploys.L2ERC721BridgeAddr, "L2ERC721Bridge"}, - {predeploys.OptimismMintableERC721FactoryAddr, "OptimismMintableERC721Factory"}, - {predeploys.BaseFeeVaultAddr, "BaseFeeVault"}, - {predeploys.L1FeeVaultAddr, "L1FeeVault"}, - {predeploys.SchemaRegistryAddr, "SchemaRegistry"}, - {predeploys.EASAddr, "EAS"}, - {predeploys.WETHAddr, "WETH"}, + {predeploys.L2ToL1MessagePasserAddr, &ps.L2ToL1MessagePasser, "L2ToL1MessagePasser"}, + {predeploys.DeployerWhitelistAddr, &ps.DeployerWhitelist, "DeployerWhitelist"}, + {predeploys.WETHAddr, &ps.WETH, "WETH"}, + {predeploys.L2CrossDomainMessengerAddr, &ps.L2CrossDomainMessenger, "L2CrossDomainMessenger"}, + {predeploys.L2StandardBridgeAddr, &ps.L2StandardBridge, "L2StandardBridge"}, + {predeploys.SequencerFeeVaultAddr, &ps.SequencerFeeVault, "SequencerFeeVault"}, + {predeploys.OptimismMintableERC20FactoryAddr, &ps.OptimismMintableERC20Factory, "OptimismMintableERC20Factory"}, + {predeploys.L1BlockNumberAddr, &ps.L1BlockNumber, "L1BlockNumber"}, + {predeploys.GasPriceOracleAddr, &ps.GasPriceOracle, "GasPriceOracle"}, + {predeploys.L1BlockAddr, &ps.L1Block, "L1Block"}, + {predeploys.LegacyMessagePasserAddr, &ps.LegacyMessagePasser, "LegacyMessagePasser"}, + {predeploys.L2ERC721BridgeAddr, &ps.L2ERC721Bridge, "L2ERC721Bridge"}, + {predeploys.OptimismMintableERC721FactoryAddr, &ps.OptimismMintableERC721Factory, "OptimismMintableERC721Factory"}, + {predeploys.BaseFeeVaultAddr, &ps.BaseFeeVault, "BaseFeeVault"}, + {predeploys.L1FeeVaultAddr, &ps.L1FeeVault, "L1FeeVault"}, + {predeploys.SchemaRegistryAddr, &ps.SchemaRegistry, "SchemaRegistry"}, + {predeploys.EASAddr, &ps.EAS, "EAS"}, } for _, contract := range contracts { - data, _, err := host.Call( - addr, - contract.Address, - bytes.Clone(versionSelector), - 1_000_000_000, - uint256.NewInt(0), - ) + semver, err := ReadSemver(host, contract.Address) if err != nil { - return fmt.Errorf("failed to call version on %s: %w", contract.Name, err) - } - - // The second 32 bytes contain the length of the string - length := new(big.Int).SetBytes(data[32:64]).Int64() - // Start of the string data (after offset and length) - stringStart := 64 - stringEnd := int64(stringStart) + length - - // Bounds check - if stringEnd > int64(len(data)) { - return fmt.Errorf("string data out of bounds") + return nil, fmt.Errorf("failed to read semver for %s: %w", contract.Name, err) } - contractsOutput[contract.Name] = string(data[stringStart:stringEnd]) + *contract.FieldPtr = semver } erc20Semver, err := findSemverBytecode(host, predeploys.OptimismMintableERC20FactoryAddr) if err == nil { - contractsOutput["OptimismMintableERC20"] = erc20Semver + ps.OptimismMintableERC20 = erc20Semver } else { l.Warn("failed to find semver for OptimismMintableERC20", "err", err) } erc721Semver, err := findSemverBytecode(host, predeploys.OptimismMintableERC721FactoryAddr) if err == nil { - contractsOutput["OptimismMintableERC721"] = erc721Semver + ps.OptimismMintableERC721 = erc721Semver } else { l.Warn("failed to find semver for OptimismMintableERC721", "err", err) } - if err := jsonutil.WriteJSON(contractsOutput, ioutil.ToStdOutOrFileOrNoop(cliCfg.Outfile, 0o666)); err != nil { - return fmt.Errorf("failed to write rollup config: %w", err) + return &ps, nil +} + +var versionSelector = []byte{0x54, 0xfd, 0x4d, 0x50} + +func ReadSemver(host *script.Host, addr common.Address) (string, error) { + data, _, err := host.Call( + common.Address{19: 0x01}, + addr, + bytes.Clone(versionSelector), + 1_000_000_000, + uint256.NewInt(0), + ) + if err != nil { + return "", fmt.Errorf("failed to call version on %s: %w", addr, err) } - return nil + // The second 32 bytes contain the length of the string + length := new(big.Int).SetBytes(data[32:64]).Int64() + // Start of the string data (after offset and length) + stringStart := 64 + stringEnd := int64(stringStart) + length + + // Bounds check + if stringEnd > int64(len(data)) { + return "", fmt.Errorf("string data out of bounds") + } + + return string(data[stringStart:stringEnd]), nil } const patternLen = 24 diff --git a/op-deployer/pkg/deployer/inspect/superchain_registry.go b/op-deployer/pkg/deployer/inspect/superchain_registry.go index 7a6fe384db6..e5ee56568b5 100644 --- a/op-deployer/pkg/deployer/inspect/superchain_registry.go +++ b/op-deployer/pkg/deployer/inspect/superchain_registry.go @@ -28,6 +28,10 @@ func SuperchainRegistryCLI(cliCtx *cli.Context) error { return fmt.Errorf("failed to read intent: %w", err) } + if err := globalIntent.Check(); err != nil { + return fmt.Errorf("intent check failed: %w", err) + } + envVars := map[string]string{} envVars["SCR_CHAIN_NAME"] = "" envVars["SCR_CHAIN_SHORT_NAME"] = "" diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 215687c5cfc..f977fbbb0c7 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -1,12 +1,16 @@ package integration_test import ( + "bufio" "bytes" + "compress/gzip" "context" "crypto/rand" "encoding/hex" + "encoding/json" "fmt" "log/slog" + "maps" "math/big" "os" "testing" @@ -64,6 +68,8 @@ network_params: genesis_delay: 0 ` +const defaultL1ChainID uint64 = 77799777 + type deployerKey struct{} func (d *deployerKey) HDPath() string { @@ -94,21 +100,38 @@ func TestEndToEndApply(t *testing.T) { l1Client, err := ethclient.Dial(rpcURL) require.NoError(t, err) - depKey := new(deployerKey) - l1ChainID := big.NewInt(77799777) - dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + pk, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") require.NoError(t, err) - pk, err := dk.Secret(depKey) + + l1ChainID := new(big.Int).SetUint64(defaultL1ChainID) + dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) l2ChainID1 := uint256.NewInt(1) l2ChainID2 := uint256.NewInt(2) loc, _ := testutil.LocalArtifacts(t) - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) - cg := ethClientCodeGetter(ctx, l1Client) - t.Run("initial chain", func(t *testing.T) { + t.Run("two chains one after another", func(t *testing.T) { + intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + cg := ethClientCodeGetter(ctx, l1Client) + + require.NoError(t, deployer.ApplyPipeline( + ctx, + deployer.ApplyPipelineOpts{ + L1RPCUrl: rpcURL, + DeployerPrivateKey: pk, + Intent: intent, + State: st, + Logger: lgr, + StateWriter: pipeline.NoopStateWriter(), + }, + )) + + // create a new environment with wiped state to ensure we can continue using the + // state from the previous deployment + intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID2)) + require.NoError(t, deployer.ApplyPipeline( ctx, deployer.ApplyPipelineOpts{ @@ -125,10 +148,12 @@ func TestEndToEndApply(t *testing.T) { validateOPChainDeployment(t, cg, st, intent) }) - t.Run("subsequent chain", func(t *testing.T) { - // create a new environment with wiped state to ensure we can continue using the - // state from the previous deployment - intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID2)) + t.Run("chain with tagged artifacts", func(t *testing.T) { + intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + cg := ethClientCodeGetter(ctx, l1Client) + + intent.L1ContractsLocator = artifacts.DefaultL1ContractsLocator + intent.L2ContractsLocator = artifacts.DefaultL2ContractsLocator require.NoError(t, deployer.ApplyPipeline( ctx, @@ -142,14 +167,25 @@ func TestEndToEndApply(t *testing.T) { }, )) + validateSuperchainDeployment(t, st, cg) validateOPChainDeployment(t, cg, st, intent) }) } func TestApplyExistingOPCM(t *testing.T) { + t.Run("mainnet", func(t *testing.T) { + testApplyExistingOPCM(t, 1, os.Getenv("MAINNET_RPC_URL"), standard.L1VersionsMainnet) + }) + t.Run("sepolia", func(t *testing.T) { + testApplyExistingOPCM(t, 11155111, os.Getenv("SEPOLIA_RPC_URL"), standard.L1VersionsSepolia) + }) +} + +func testApplyExistingOPCM(t *testing.T, l1ChainID uint64, forkRPCUrl string, versions standard.L1Versions) { + op_e2e.InitParallel(t) + anvil.Test(t) - forkRPCUrl := os.Getenv("SEPOLIA_RPC_URL") if forkRPCUrl == "" { t.Skip("no fork RPC URL provided") } @@ -173,22 +209,24 @@ func TestApplyExistingOPCM(t *testing.T) { l1Client, err := ethclient.Dial(runner.RPCUrl()) require.NoError(t, err) - l1ChainID := big.NewInt(11155111) + l1ChainIDBig := new(big.Int).SetUint64(l1ChainID) dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) // index 0 from Anvil's test set pk, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") require.NoError(t, err) - l2ChainID := uint256.NewInt(1) + l2ChainID := uint256.NewInt(777) + // Hardcode the below tags to ensure the test is validating the correct + // version even if the underlying tag changes intent, st := newIntent( t, - l1ChainID, + l1ChainIDBig, dk, l2ChainID, - artifacts.DefaultL1ContractsLocator, - artifacts.DefaultL2ContractsLocator, + artifacts.MustNewLocatorFromTag("op-contracts/v1.6.0"), + artifacts.MustNewLocatorFromTag("op-contracts/v1.7.0-beta.1+l2-contracts"), ) // Define a new create2 salt to avoid contract address collisions _, err = rand.Read(st.Create2Salt[:]) @@ -207,6 +245,171 @@ func TestApplyExistingOPCM(t *testing.T) { )) validateOPChainDeployment(t, ethClientCodeGetter(ctx, l1Client), st, intent) + + releases := versions.Releases["op-contracts/v1.6.0"] + + implTests := []struct { + name string + expAddr common.Address + actAddr common.Address + }{ + {"OptimismPortal", releases.OptimismPortal.ImplementationAddress, st.ImplementationsDeployment.OptimismPortalImplAddress}, + {"SystemConfig,", releases.SystemConfig.ImplementationAddress, st.ImplementationsDeployment.SystemConfigImplAddress}, + {"L1CrossDomainMessenger", releases.L1CrossDomainMessenger.ImplementationAddress, st.ImplementationsDeployment.L1CrossDomainMessengerImplAddress}, + {"L1ERC721Bridge", releases.L1ERC721Bridge.ImplementationAddress, st.ImplementationsDeployment.L1ERC721BridgeImplAddress}, + {"L1StandardBridge", releases.L1StandardBridge.ImplementationAddress, st.ImplementationsDeployment.L1StandardBridgeImplAddress}, + {"OptimismMintableERC20Factory", releases.OptimismMintableERC20Factory.ImplementationAddress, st.ImplementationsDeployment.OptimismMintableERC20FactoryImplAddress}, + {"DisputeGameFactory", releases.DisputeGameFactory.ImplementationAddress, st.ImplementationsDeployment.DisputeGameFactoryImplAddress}, + {"MIPS", releases.MIPS.Address, st.ImplementationsDeployment.MipsSingletonAddress}, + {"PreimageOracle", releases.PreimageOracle.Address, st.ImplementationsDeployment.PreimageOracleSingletonAddress}, + {"DelayedWETH", releases.DelayedWETH.ImplementationAddress, st.ImplementationsDeployment.DelayedWETHImplAddress}, + } + for _, tt := range implTests { + require.Equal(t, tt.expAddr, tt.actAddr, "unexpected address for %s", tt.name) + } + + superchain, err := standard.SuperchainFor(l1ChainIDBig.Uint64()) + require.NoError(t, err) + + managerOwner, err := standard.ManagerOwnerAddrFor(l1ChainIDBig.Uint64()) + require.NoError(t, err) + + superchainTests := []struct { + name string + expAddr common.Address + actAddr common.Address + }{ + {"ProxyAdmin", managerOwner, st.SuperchainDeployment.ProxyAdminAddress}, + {"SuperchainConfig", common.Address(*superchain.Config.SuperchainConfigAddr), st.SuperchainDeployment.SuperchainConfigProxyAddress}, + {"ProtocolVersions", common.Address(*superchain.Config.ProtocolVersionsAddr), st.SuperchainDeployment.ProtocolVersionsProxyAddress}, + } + for _, tt := range superchainTests { + require.Equal(t, tt.expAddr, tt.actAddr, "unexpected address for %s", tt.name) + } + + artifactsFSL2, cleanupL2, err := artifacts.Download( + ctx, + intent.L2ContractsLocator, + artifacts.LogProgressor(lgr), + ) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, cleanupL2()) + }) + + chainState := st.Chains[0] + chainIntent := intent.Chains[0] + + semvers, err := inspect.L2Semvers(inspect.L2SemversConfig{ + Lgr: lgr, + Artifacts: artifactsFSL2, + ChainState: chainState, + }) + require.NoError(t, err) + + expectedSemversL2 := &inspect.L2PredeploySemvers{ + L2ToL1MessagePasser: "1.1.1-beta.1", + DeployerWhitelist: "1.1.1-beta.1", + WETH: "1.0.0-beta.1", + L2CrossDomainMessenger: "2.1.1-beta.1", + L2StandardBridge: "1.11.1-beta.1", + SequencerFeeVault: "1.5.0-beta.2", + OptimismMintableERC20Factory: "1.10.1-beta.2", + L1BlockNumber: "1.1.1-beta.1", + GasPriceOracle: "1.3.1-beta.1", + L1Block: "1.5.1-beta.1", + LegacyMessagePasser: "1.1.1-beta.1", + L2ERC721Bridge: "1.7.1-beta.2", + OptimismMintableERC721Factory: "1.4.1-beta.1", + BaseFeeVault: "1.5.0-beta.2", + L1FeeVault: "1.5.0-beta.2", + SchemaRegistry: "1.3.1-beta.1", + EAS: "1.4.1-beta.1", + CrossL2Inbox: "", + L2toL2CrossDomainMessenger: "", + SuperchainWETH: "", + ETHLiquidity: "", + SuperchainTokenBridge: "", + OptimismMintableERC20: "1.4.0-beta.1", + OptimismMintableERC721: "1.3.1-beta.1", + } + + require.EqualValues(t, expectedSemversL2, semvers) + + f, err := os.Open(fmt.Sprintf("./testdata/allocs-l2-v160-%d.json.gz", l1ChainID)) + require.NoError(t, err) + defer f.Close() + gzr, err := gzip.NewReader(f) + require.NoError(t, err) + defer gzr.Close() + dec := json.NewDecoder(bufio.NewReader(gzr)) + var expAllocs types.GenesisAlloc + require.NoError(t, dec.Decode(&expAllocs)) + + type storageCheckerFunc func(addr common.Address, actStorage map[common.Hash]common.Hash) + + storageDiff := func(addr common.Address, expStorage, actStorage map[common.Hash]common.Hash) { + require.EqualValues(t, expStorage, actStorage, "storage for %s differs", addr) + } + + defaultStorageChecker := func(addr common.Address, actStorage map[common.Hash]common.Hash) { + storageDiff(addr, expAllocs[addr].Storage, actStorage) + } + + overrideStorageChecker := func(addr common.Address, actStorage, overrides map[common.Hash]common.Hash) { + expStorage := make(map[common.Hash]common.Hash) + maps.Copy(expStorage, expAllocs[addr].Storage) + maps.Copy(expStorage, overrides) + storageDiff(addr, expStorage, actStorage) + } + + storageCheckers := map[common.Address]storageCheckerFunc{ + predeploys.L2CrossDomainMessengerAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0xcf}: common.BytesToHash(chainState.L1CrossDomainMessengerProxyAddress.Bytes()), + }) + }, + predeploys.L2StandardBridgeAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0x04}: common.BytesToHash(chainState.L1StandardBridgeProxyAddress.Bytes()), + }) + }, + predeploys.L2ERC721BridgeAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {31: 0x02}: common.BytesToHash(chainState.L1ERC721BridgeProxyAddress.Bytes()), + }) + }, + predeploys.ProxyAdminAddr: func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {}: common.BytesToHash(intent.Chains[0].Roles.L2ProxyAdminOwner.Bytes()), + }) + }, + // The ProxyAdmin owner is also set on the ProxyAdmin contract's implementation address, see + // L2Genesis.s.sol line 292. + common.HexToAddress("0xc0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30018"): func(addr common.Address, actStorage map[common.Hash]common.Hash) { + overrideStorageChecker(addr, actStorage, map[common.Hash]common.Hash{ + {}: common.BytesToHash(chainIntent.Roles.L2ProxyAdminOwner.Bytes()), + }) + }, + } + + //Use a custom equality function to compare the genesis allocs + //because the reflect-based one is really slow + actAllocs := st.Chains[0].Allocs.Data.Accounts + require.Equal(t, len(expAllocs), len(actAllocs)) + for addr, expAcc := range expAllocs { + actAcc, ok := actAllocs[addr] + require.True(t, ok) + require.True(t, expAcc.Balance.Cmp(actAcc.Balance) == 0, "balance for %s differs", addr) + require.Equal(t, expAcc.Nonce, actAcc.Nonce, "nonce for %s differs", addr) + require.Equal(t, hex.EncodeToString(expAllocs[addr].Code), hex.EncodeToString(actAcc.Code), "code for %s differs", addr) + + storageChecker, ok := storageCheckers[addr] + if !ok { + storageChecker = defaultStorageChecker + } + storageChecker(addr, actAcc.Storage) + } } func TestL2BlockTimeOverride(t *testing.T) { @@ -216,7 +419,7 @@ func TestL2BlockTimeOverride(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.GlobalDeployOverrides = map[string]interface{}{ "l2BlockTime": float64(3), } @@ -234,7 +437,7 @@ func TestApplyGenesisStrategy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) require.NoError(t, deployer.ApplyPipeline(ctx, opts)) @@ -254,7 +457,7 @@ func TestProofParamOverrides(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.GlobalDeployOverrides = map[string]any{ "withdrawalDelaySeconds": standard.WithdrawalDelaySeconds + 1, "minProposalSizeBytes": standard.MinProposalSizeBytes + 1, @@ -351,7 +554,7 @@ func TestInteropDeployment(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) intent.UseInterop = true require.NoError(t, deployer.ApplyPipeline(ctx, opts)) @@ -369,7 +572,7 @@ func TestAltDADeployment(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts, intent, st := setupGenesisChain(t) + opts, intent, st := setupGenesisChain(t, defaultL1ChainID) altDACfg := genesis.AltDADeployConfig{ UseAltDA: true, DACommitmentType: altda.KeccakCommitmentString, @@ -447,7 +650,7 @@ func TestInvalidL2Genesis(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - opts, intent, _ := setupGenesisChain(t) + opts, intent, _ := setupGenesisChain(t, defaultL1ChainID) intent.DeploymentStrategy = state.DeploymentStrategyGenesis intent.GlobalDeployOverrides = tt.overrides @@ -458,11 +661,11 @@ func TestInvalidL2Genesis(t *testing.T) { } } -func setupGenesisChain(t *testing.T) (deployer.ApplyPipelineOpts, *state.Intent, *state.State) { +func setupGenesisChain(t *testing.T, l1ChainID uint64) (deployer.ApplyPipelineOpts, *state.Intent, *state.State) { lgr := testlog.Logger(t, slog.LevelDebug) depKey := new(deployerKey) - l1ChainID := big.NewInt(77799777) + l1ChainIDBig := new(big.Int).SetUint64(l1ChainID) dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) require.NoError(t, err) @@ -473,8 +676,8 @@ func setupGenesisChain(t *testing.T) (deployer.ApplyPipelineOpts, *state.Intent, loc, _ := testutil.LocalArtifacts(t) - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) - intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID1)) + intent, st := newIntent(t, l1ChainIDBig, dk, l2ChainID1, loc, loc) + intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainIDBig, l2ChainID1)) intent.DeploymentStrategy = state.DeploymentStrategyGenesis opts := deployer.ApplyPipelineOpts{ @@ -571,7 +774,7 @@ func validateSuperchainDeployment(t *testing.T, st *state.State, cg codeGetter) {"SuperchainConfigImpl", st.SuperchainDeployment.SuperchainConfigImplAddress}, {"ProtocolVersionsProxy", st.SuperchainDeployment.ProtocolVersionsProxyAddress}, {"ProtocolVersionsImpl", st.SuperchainDeployment.ProtocolVersionsImplAddress}, - {"OpcmProxy", st.ImplementationsDeployment.OpcmProxyAddress}, + {"Opcm", st.ImplementationsDeployment.OpcmAddress}, {"PreimageOracleSingleton", st.ImplementationsDeployment.PreimageOracleSingletonAddress}, {"MipsSingleton", st.ImplementationsDeployment.MipsSingletonAddress}, } diff --git a/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz new file mode 100644 index 00000000000..7a5450d6419 Binary files /dev/null and b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-1.json.gz differ diff --git a/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz new file mode 100644 index 00000000000..2fed12ccec4 Binary files /dev/null and b/op-deployer/pkg/deployer/integration_test/testdata/allocs-l2-v160-11155111.json.gz differ diff --git a/op-deployer/pkg/deployer/opcm/contract.go b/op-deployer/pkg/deployer/opcm/contract.go index b90db581419..8d02f77e8e0 100644 --- a/op-deployer/pkg/deployer/opcm/contract.go +++ b/op-deployer/pkg/deployer/opcm/contract.go @@ -48,57 +48,6 @@ func (c *Contract) GenericAddressGetter(ctx context.Context, functionName string return c.callContractMethod(ctx, functionName, abi.Arguments{}) } -// GetImplementation retrieves the Implementation struct for a given release and contract name. -func (c *Contract) GetOPCMImplementationAddress(ctx context.Context, release, contractName string) (common.Address, error) { - methodName := "implementations" - method := abi.NewMethod( - methodName, - methodName, - abi.Function, - "view", - true, - false, - abi.Arguments{ - {Name: "release", Type: mustType("string")}, - {Name: "contractName", Type: mustType("string")}, - }, - abi.Arguments{ - {Name: "logic", Type: mustType("address")}, - {Name: "initializer", Type: mustType("bytes4")}, - }, - ) - - calldata, err := method.Inputs.Pack(release, contractName) - if err != nil { - return common.Address{}, fmt.Errorf("failed to pack inputs: %w", err) - } - - msg := ethereum.CallMsg{ - To: &c.addr, - Data: append(bytes.Clone(method.ID), calldata...), - } - - result, err := c.client.CallContract(ctx, msg, nil) - if err != nil { - return common.Address{}, fmt.Errorf("failed to call contract: %w", err) - } - - out, err := method.Outputs.Unpack(result) - if err != nil { - return common.Address{}, fmt.Errorf("failed to unpack result: %w", err) - } - if len(out) != 2 { - return common.Address{}, fmt.Errorf("unexpected output length: %d", len(out)) - } - - logic, ok := out[0].(common.Address) - if !ok { - return common.Address{}, fmt.Errorf("unexpected type for logic: %T", out[0]) - } - - return logic, nil -} - func (c *Contract) callContractMethod(ctx context.Context, methodName string, inputs abi.Arguments, args ...interface{}) (common.Address, error) { method := abi.NewMethod( methodName, diff --git a/op-deployer/pkg/deployer/opcm/implementations.go b/op-deployer/pkg/deployer/opcm/implementations.go index 8dd072eef24..413452b1d34 100644 --- a/op-deployer/pkg/deployer/opcm/implementations.go +++ b/op-deployer/pkg/deployer/opcm/implementations.go @@ -18,12 +18,11 @@ type DeployImplementationsInput struct { DisputeGameFinalityDelaySeconds *big.Int MipsVersion *big.Int // Release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - Release string + L1ContractsRelease string SuperchainConfigProxy common.Address ProtocolVersionsProxy common.Address UseInterop bool // if true, deploy Interop implementations - OpcmProxyOwner common.Address StandardVersionsToml string // contents of 'standard-versions-mainnet.toml' or 'standard-versions-sepolia.toml' file } @@ -32,8 +31,7 @@ func (input *DeployImplementationsInput) InputSet() bool { } type DeployImplementationsOutput struct { - OpcmProxy common.Address - OpcmImpl common.Address + Opcm common.Address DelayedWETHImpl common.Address OptimismPortalImpl common.Address PreimageOracleSingleton common.Address diff --git a/op-deployer/pkg/deployer/opcm/opchain.go b/op-deployer/pkg/deployer/opcm/opchain.go index 1e1b468a417..8c7e60fec4d 100644 --- a/op-deployer/pkg/deployer/opcm/opchain.go +++ b/op-deployer/pkg/deployer/opcm/opchain.go @@ -26,7 +26,7 @@ type DeployOPChainInputV160 struct { BasefeeScalar uint32 BlobBaseFeeScalar uint32 L2ChainId *big.Int - OpcmProxy common.Address + Opcm common.Address SaltMixer string GasLimit uint64 @@ -122,8 +122,8 @@ func deployOPChain[T any](host *script.Host, input T) (DeployOPChainOutput, erro type ReadImplementationAddressesInput struct { DeployOPChainOutput - OpcmProxy common.Address - Release string + Opcm common.Address + Release string } type ReadImplementationAddressesOutput struct { diff --git a/op-deployer/pkg/deployer/pipeline/alt_da.go b/op-deployer/pkg/deployer/pipeline/alt_da.go index 62796832c93..b3641262094 100644 --- a/op-deployer/pkg/deployer/pipeline/alt_da.go +++ b/op-deployer/pkg/deployer/pipeline/alt_da.go @@ -31,7 +31,7 @@ func DeployAltDA(env *Env, intent *state.Intent, st *state.State, chainID common lgr.Info("deploying alt-da contracts") dao, err = opcm.DeployAltDA(env.L1ScriptHost, opcm.DeployAltDAInput{ Salt: st.Create2Salt, - ProxyAdmin: st.ImplementationsDeployment.OpcmProxyAddress, + ProxyAdmin: chainState.ProxyAdminAddress, ChallengeContractOwner: chainIntent.Roles.L1ProxyAdminOwner, ChallengeWindow: new(big.Int).SetUint64(chainIntent.DangerousAltDAConfig.DAChallengeWindow), ResolveWindow: new(big.Int).SetUint64(chainIntent.DangerousAltDAConfig.DAResolveWindow), diff --git a/op-deployer/pkg/deployer/pipeline/implementations.go b/op-deployer/pkg/deployer/pipeline/implementations.go index 47ea91fbe96..c2d409b5c3a 100644 --- a/op-deployer/pkg/deployer/pipeline/implementations.go +++ b/op-deployer/pkg/deployer/pipeline/implementations.go @@ -35,10 +35,12 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro var err error if intent.L1ContractsLocator.IsTag() && intent.DeploymentStrategy == state.DeploymentStrategyLive { standardVersionsTOML, err = standard.L1VersionsDataFor(intent.L1ChainID) - if err != nil { - return fmt.Errorf("error getting standard versions TOML: %w", err) + if err == nil { + contractsRelease = intent.L1ContractsLocator.Tag + } else { + contractsRelease = "dev" } - contractsRelease = intent.L1ContractsLocator.Tag + } else { contractsRelease = "dev" } @@ -68,10 +70,9 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro ProofMaturityDelaySeconds: new(big.Int).SetUint64(proofParams.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(proofParams.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(proofParams.MIPSVersion), - Release: contractsRelease, + L1ContractsRelease: contractsRelease, SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxyAddress, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxyAddress, - OpcmProxyOwner: st.SuperchainDeployment.ProxyAdminAddress, StandardVersionsToml: standardVersionsTOML, UseInterop: intent.UseInterop, }, @@ -81,7 +82,7 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: dio.OpcmProxy, + OpcmAddress: dio.Opcm, DelayedWETHImplAddress: dio.DelayedWETHImpl, OptimismPortalImplAddress: dio.OptimismPortalImpl, PreimageOracleSingletonAddress: dio.PreimageOracleSingleton, diff --git a/op-deployer/pkg/deployer/pipeline/init.go b/op-deployer/pkg/deployer/pipeline/init.go index f8b12e36a43..88caf760f41 100644 --- a/op-deployer/pkg/deployer/pipeline/init.go +++ b/op-deployer/pkg/deployer/pipeline/init.go @@ -1,11 +1,15 @@ package pipeline import ( + "bufio" "context" "crypto/rand" "fmt" + "os" + "strings" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/mattn/go-isatty" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" @@ -26,7 +30,11 @@ func InitLiveStrategy(ctx context.Context, env *Env, intent *state.Intent, st *s return err } - if intent.L1ContractsLocator.IsTag() { + opcmAddress, opcmAddrErr := standard.ManagerImplementationAddrFor(intent.L1ChainID) + hasPredeployedOPCM := opcmAddrErr == nil + isTag := intent.L1ContractsLocator.IsTag() + + if isTag && hasPredeployedOPCM { superCfg, err := standard.SuperchainFor(intent.L1ChainID) if err != nil { return fmt.Errorf("error getting superchain config: %w", err) @@ -45,12 +53,12 @@ func InitLiveStrategy(ctx context.Context, env *Env, intent *state.Intent, st *s SuperchainConfigProxyAddress: common.Address(*superCfg.Config.SuperchainConfigAddr), } - opcmProxy, err := standard.ManagerImplementationAddrFor(intent.L1ChainID) - if err != nil { - return fmt.Errorf("error getting OPCM proxy address: %w", err) - } st.ImplementationsDeployment = &state.ImplementationsDeployment{ - OpcmProxyAddress: opcmProxy, + OpcmAddress: opcmAddress, + } + } else if isTag && !hasPredeployedOPCM { + if err := displayWarning(); err != nil { + return err } } @@ -127,3 +135,38 @@ func InitGenesisStrategy(env *Env, intent *state.Intent, st *state.State) error func immutableErr(field string, was, is any) error { return fmt.Errorf("%s is immutable: was %v, is %v", field, was, is) } + +func displayWarning() error { + warning := strings.TrimPrefix(` +####################### WARNING! WARNING WARNING! ####################### + +You are deploying a tagged release to a chain with no pre-deployed OPCM. +The contracts you are deploying may not be audited, or match a governance +approved release. + +USE OF THIS DEPLOYMENT IS NOT RECOMMENDED FOR PRODUCTION. USE AT YOUR OWN +RISK. BUGS OR LOSS OF FUNDS MAY OCCUR. WE HOPE YOU KNOW WHAT YOU ARE +DOING. + +####################### WARNING! WARNING WARNING! ####################### +`, "\n") + + _, _ = fmt.Fprint(os.Stderr, warning) + + if isatty.IsTerminal(os.Stdout.Fd()) { + _, _ = fmt.Fprintf(os.Stderr, "Please confirm that you have read and understood the warning above [y/n]: ") + + reader := bufio.NewReader(os.Stdin) + input, err := reader.ReadString('\n') + if err != nil { + return fmt.Errorf("failed to read input: %w", err) + } + + input = strings.ToLower(strings.TrimSpace(input)) + if input != "y" && input != "yes" { + return fmt.Errorf("aborted") + } + } + + return nil +} diff --git a/op-deployer/pkg/deployer/pipeline/opchain.go b/op-deployer/pkg/deployer/pipeline/opchain.go index a49848883ce..90cc665e077 100644 --- a/op-deployer/pkg/deployer/pipeline/opchain.go +++ b/op-deployer/pkg/deployer/pipeline/opchain.go @@ -34,7 +34,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm return opcm.DeployOPChainOutput{}, fmt.Errorf("error making deploy OP chain input: %w", err) } - opcmAddr = input.OpcmProxy + opcmAddr = input.Opcm return opcm.DeployOPChainV160(env.L1ScriptHost, input) } default: @@ -44,7 +44,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm return opcm.DeployOPChainOutput{}, fmt.Errorf("error making deploy OP chain input: %w", err) } - opcmAddr = input.OpcmProxy + opcmAddr = input.Opcm return opcm.DeployOPChainIsthmus(env.L1ScriptHost, input) } } @@ -67,7 +67,7 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm readInput := opcm.ReadImplementationAddressesInput{ DeployOPChainOutput: dco, - OpcmProxy: opcmAddr, + Opcm: opcmAddr, Release: release, } impls, err := opcm.ReadImplementationAddresses(env.L1ScriptHost, readInput) @@ -126,7 +126,7 @@ func makeDCIV160(intent *state.Intent, thisIntent *state.ChainIntent, chainID co BasefeeScalar: standard.BasefeeScalar, BlobBaseFeeScalar: standard.BlobBaseFeeScalar, L2ChainId: chainID.Big(), - OpcmProxy: st.ImplementationsDeployment.OpcmProxyAddress, + Opcm: st.ImplementationsDeployment.OpcmAddress, SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization GasLimit: standard.GasLimit, DisputeGameType: proofParams.DisputeGameType, diff --git a/op-deployer/pkg/deployer/standard/standard.go b/op-deployer/pkg/deployer/standard/standard.go index 2fb6de8c5bf..75902424ac7 100644 --- a/op-deployer/pkg/deployer/standard/standard.go +++ b/op-deployer/pkg/deployer/standard/standard.go @@ -133,11 +133,15 @@ func CommitForDeployTag(tag string) (string, error) { func ManagerImplementationAddrFor(chainID uint64) (common.Address, error) { switch chainID { case 1: - // Generated using the bootstrap command on 10/18/2024. - return common.HexToAddress("0x18cec91779995ad14c880e4095456b9147160790"), nil + // Generated using the bootstrap command on 11/18/2024. + // Verified against compiled bytecode at: + // https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts-v160-artifacts-opcm-redesign-backport + return common.HexToAddress("0x9BC0A1eD534BFb31a6Be69e5b767Cba332f14347"), nil case 11155111: - // Generated using the bootstrap command on 10/18/2024. - return common.HexToAddress("0xf564eea7960ea244bfebcbbb17858748606147bf"), nil + // Generated using the bootstrap command on 11/18/2024. + // Verified against compiled bytecode at: + // https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts-v160-artifacts-opcm-redesign-backport + return common.HexToAddress("0x760B1d2Dc68DC51fb6E8B2b8722B8ed08903540c"), nil default: return common.Address{}, fmt.Errorf("unsupported chain ID: %d", chainID) } @@ -172,7 +176,7 @@ func SystemOwnerAddrFor(chainID uint64) (common.Address, error) { func ArtifactsURLForTag(tag string) (*url.URL, error) { switch tag { case "op-contracts/v1.6.0": - return url.Parse(standardArtifactsURL("3a27c6dc0cb61b36feaac26def98c64b4a48ec8f5c5ba6965e8ae3157606043c")) + return url.Parse(standardArtifactsURL("e1f0c4020618c4a98972e7124c39686cab2e31d5d7846f9ce5e0d5eed0f5ff32")) case "op-contracts/v1.7.0-beta.1+l2-contracts": return url.Parse(standardArtifactsURL("b0fb1f6f674519d637cff39a22187a5993d7f81a6d7b7be6507a0b50a5e38597")) default: @@ -180,6 +184,17 @@ func ArtifactsURLForTag(tag string) (*url.URL, error) { } } +func ArtifactsHashForTag(tag string) (common.Hash, error) { + switch tag { + case "op-contracts/v1.6.0": + return common.HexToHash("d20a930cc0ff204c2d93b7aa60755ec7859ba4f328b881f5090c6a6a2a86dcba"), nil + case "op-contracts/v1.7.0-beta.1+l2-contracts": + return common.HexToHash("9e3ad322ec9b2775d59143ce6874892f9b04781742c603ad59165159e90b00b9"), nil + default: + return common.Hash{}, fmt.Errorf("unsupported tag: %s", tag) + } +} + func standardArtifactsURL(checksum string) string { return fmt.Sprintf("https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-%s.tar.gz", checksum) } diff --git a/op-deployer/pkg/deployer/state/deploy_config.go b/op-deployer/pkg/deployer/state/deploy_config.go index 1a03c21d7e9..11445c2fb69 100644 --- a/op-deployer/pkg/deployer/state/deploy_config.go +++ b/op-deployer/pkg/deployer/state/deploy_config.go @@ -63,6 +63,11 @@ func CombineDeployConfig(intent *Intent, chainIntent *ChainIntent, state *State, EIP1559DenominatorCanyon: 250, EIP1559Elasticity: chainIntent.Eip1559Elasticity, }, + + // STOP! This struct sets the _default_ upgrade schedule for all chains. + // Any upgrades you enable here will be enabled for all new deployments. + // In-development hardforks should never be activated here. Instead, they + // should be specified as overrides. UpgradeScheduleDeployConfig: genesis.UpgradeScheduleDeployConfig{ L2GenesisRegolithTimeOffset: u64UtilPtr(0), L2GenesisCanyonTimeOffset: u64UtilPtr(0), diff --git a/op-deployer/pkg/deployer/state/deploy_config_test.go b/op-deployer/pkg/deployer/state/deploy_config_test.go new file mode 100644 index 00000000000..c0381507d16 --- /dev/null +++ b/op-deployer/pkg/deployer/state/deploy_config_test.go @@ -0,0 +1,47 @@ +package state + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/stretchr/testify/require" +) + +func TestCombineDeployConfig(t *testing.T) { + intent := Intent{ + L1ChainID: 1, + } + chainState := ChainState{ + ID: common.HexToHash("0x123"), + } + chainIntent := ChainIntent{ + Eip1559Denominator: 1, + Eip1559Elasticity: 2, + BaseFeeVaultRecipient: common.HexToAddress("0x123"), + L1FeeVaultRecipient: common.HexToAddress("0x456"), + SequencerFeeVaultRecipient: common.HexToAddress("0x789"), + Roles: ChainRoles{ + SystemConfigOwner: common.HexToAddress("0x123"), + L1ProxyAdminOwner: common.HexToAddress("0x456"), + L2ProxyAdminOwner: common.HexToAddress("0x789"), + UnsafeBlockSigner: common.HexToAddress("0xabc"), + Batcher: common.HexToAddress("0xdef"), + }, + } + state := State{ + SuperchainDeployment: &SuperchainDeployment{ProtocolVersionsProxyAddress: common.HexToAddress("0x123")}, + } + + // apply hard fork overrides + chainIntent.DeployOverrides = map[string]any{ + "l2GenesisGraniteTimeOffset": "0x8", + "l2GenesisHoloceneTimeOffset": "0x10", + } + + out, err := CombineDeployConfig(&intent, &chainIntent, &state, &chainState) + require.NoError(t, err) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisFjordTimeOffset, hexutil.Uint64(0)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisGraniteTimeOffset, hexutil.Uint64(8)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisHoloceneTimeOffset, hexutil.Uint64(16)) +} diff --git a/op-deployer/pkg/deployer/state/intent.go b/op-deployer/pkg/deployer/state/intent.go index 860944666e9..86e6cfc1ed4 100644 --- a/op-deployer/pkg/deployer/state/intent.go +++ b/op-deployer/pkg/deployer/state/intent.go @@ -1,6 +1,7 @@ package state import ( + "errors" "fmt" "math/big" @@ -67,11 +68,11 @@ func (c *Intent) Check() error { } if c.L1ContractsLocator == nil { - c.L1ContractsLocator = artifacts.DefaultL1ContractsLocator + return errors.New("l1ContractsLocator must be set") } if c.L2ContractsLocator == nil { - c.L2ContractsLocator = artifacts.DefaultL2ContractsLocator + return errors.New("l2ContractsLocator must be set") } var err error diff --git a/op-deployer/pkg/deployer/state/state.go b/op-deployer/pkg/deployer/state/state.go index e3974fa2a78..e17ed6184c5 100644 --- a/op-deployer/pkg/deployer/state/state.go +++ b/op-deployer/pkg/deployer/state/state.go @@ -64,7 +64,7 @@ type SuperchainDeployment struct { } type ImplementationsDeployment struct { - OpcmProxyAddress common.Address `json:"opcmProxyAddress"` + OpcmAddress common.Address `json:"opcmAddress"` DelayedWETHImplAddress common.Address `json:"delayedWETHImplAddress"` OptimismPortalImplAddress common.Address `json:"optimismPortalImplAddress"` PreimageOracleSingletonAddress common.Address `json:"preimageOracleSingletonAddress"` diff --git a/op-dispute-mon/version/version.go b/op-dispute-mon/version/version.go index 834fc089b19..31ad6f3582a 100644 --- a/op-dispute-mon/version/version.go +++ b/op-dispute-mon/version/version.go @@ -1,7 +1,7 @@ package version var ( - Version = "v0.1.0" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-e2e/bindings/delayedvetoable.go b/op-e2e/bindings/delayedvetoable.go deleted file mode 100644 index 989bb0278d2..00000000000 --- a/op-e2e/bindings/delayedvetoable.go +++ /dev/null @@ -1,928 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package bindings - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// DelayedVetoableMetaData contains all meta data concerning the DelayedVetoable contract. -var DelayedVetoableMetaData = &bind.MetaData{ - ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"vetoer_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"initiator_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"target_\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"operatingDelay_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"fallback\",\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"delay\",\"inputs\":[],\"outputs\":[{\"name\":\"delay_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"initiator\",\"inputs\":[],\"outputs\":[{\"name\":\"initiator_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"queuedAt\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"outputs\":[{\"name\":\"queuedAt_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"target\",\"inputs\":[],\"outputs\":[{\"name\":\"target_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"vetoer\",\"inputs\":[],\"outputs\":[{\"name\":\"vetoer_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"DelayActivated\",\"inputs\":[{\"name\":\"delay\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Forwarded\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initiated\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Vetoed\",\"inputs\":[{\"name\":\"callHash\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ForwardingEarly\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"Unauthorized\",\"inputs\":[{\"name\":\"expected\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"actual\",\"type\":\"address\",\"internalType\":\"address\"}]}]", - Bin: "0x61010060405234801561001157600080fd5b506040516108ff3803806108ff8339810160408190526100309161006e565b6001600160a01b0393841660a05291831660c05290911660805260e0526100b9565b80516001600160a01b038116811461006957600080fd5b919050565b6000806000806080858703121561008457600080fd5b61008d85610052565b935061009b60208601610052565b92506100a960408601610052565b6060959095015193969295505050565b60805160a05160c05160e0516107dc610123600039600061023f01526000818161015f01528181610205015281816102cd0152818161045801526105050152600081816101a001528181610384015261059d01526000818161057101526105ff01526107dc6000f3fe608060405234801561001057600080fd5b50600436106100725760003560e01c8063b912de5d11610050578063b912de5d14610111578063d4b8399214610124578063d8bff4401461012c57610072565b806354fd4d501461007c5780635c39fcc1146100ce5780636a42b8f8146100fb575b61007a610134565b005b6100b86040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100c591906106a7565b60405180910390f35b6100d66104fb565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100c5565b610103610532565b6040519081526020016100c5565b61010361011f36600461071a565b610540565b6100d6610567565b6100d6610593565b361580156101425750600054155b15610298573373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016148015906101c357503373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614155b1561023d576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001660048201523360248201526044015b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000060008190556040519081527febf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a89060200160405180910390a1565b600080366040516102aa929190610733565b60405190819003902090503373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103065750600081815260016020526040902054155b1561036c5760005460000361031e5761031e816105bf565b6000818152600160205260408082204290555182917f87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a139161036191903690610743565b60405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000161480156103be575060008181526001602052604090205415155b15610406576000818152600160205260408082208290555182917fbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab0409161036191903690610743565b600081815260016020526040812054900361048b576040517f295a81c100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000166004820152336024820152604401610234565b60008054828252600160205260409091205442916104a891610790565b11156104e0576040517f43dc986d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000818152600160205260408120556104f8816105bf565b50565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b61052f610134565b90565b600033610527575060005490565b60003361055a575060009081526001602052604090205490565b610562610134565b919050565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b60003361052757507f000000000000000000000000000000000000000000000000000000000000000090565b807f4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e12296000366040516105f2929190610743565b60405180910390a26000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16600036604051610645929190610733565b6000604051808303816000865af19150503d8060008114610682576040519150601f19603f3d011682016040523d82523d6000602084013e610687565b606091505b50909250905081151560010361069f57805160208201f35b805160208201fd5b600060208083528351808285015260005b818110156106d4578581018301518582016040015282016106b8565b818111156106e6576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006020828403121561072c57600080fd5b5035919050565b8183823760009101908152919050565b60208152816020820152818360408301376000818301604090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0160101919050565b600082198211156107ca577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b50019056fea164736f6c634300080f000a", -} - -// DelayedVetoableABI is the input ABI used to generate the binding from. -// Deprecated: Use DelayedVetoableMetaData.ABI instead. -var DelayedVetoableABI = DelayedVetoableMetaData.ABI - -// DelayedVetoableBin is the compiled bytecode used for deploying new contracts. -// Deprecated: Use DelayedVetoableMetaData.Bin instead. -var DelayedVetoableBin = DelayedVetoableMetaData.Bin - -// DeployDelayedVetoable deploys a new Ethereum contract, binding an instance of DelayedVetoable to it. -func DeployDelayedVetoable(auth *bind.TransactOpts, backend bind.ContractBackend, vetoer_ common.Address, initiator_ common.Address, target_ common.Address, operatingDelay_ *big.Int) (common.Address, *types.Transaction, *DelayedVetoable, error) { - parsed, err := DelayedVetoableMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DelayedVetoableBin), backend, vetoer_, initiator_, target_, operatingDelay_) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &DelayedVetoable{DelayedVetoableCaller: DelayedVetoableCaller{contract: contract}, DelayedVetoableTransactor: DelayedVetoableTransactor{contract: contract}, DelayedVetoableFilterer: DelayedVetoableFilterer{contract: contract}}, nil -} - -// DelayedVetoable is an auto generated Go binding around an Ethereum contract. -type DelayedVetoable struct { - DelayedVetoableCaller // Read-only binding to the contract - DelayedVetoableTransactor // Write-only binding to the contract - DelayedVetoableFilterer // Log filterer for contract events -} - -// DelayedVetoableCaller is an auto generated read-only Go binding around an Ethereum contract. -type DelayedVetoableCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableTransactor is an auto generated write-only Go binding around an Ethereum contract. -type DelayedVetoableTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type DelayedVetoableFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelayedVetoableSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type DelayedVetoableSession struct { - Contract *DelayedVetoable // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelayedVetoableCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type DelayedVetoableCallerSession struct { - Contract *DelayedVetoableCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// DelayedVetoableTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type DelayedVetoableTransactorSession struct { - Contract *DelayedVetoableTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelayedVetoableRaw is an auto generated low-level Go binding around an Ethereum contract. -type DelayedVetoableRaw struct { - Contract *DelayedVetoable // Generic contract binding to access the raw methods on -} - -// DelayedVetoableCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type DelayedVetoableCallerRaw struct { - Contract *DelayedVetoableCaller // Generic read-only contract binding to access the raw methods on -} - -// DelayedVetoableTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type DelayedVetoableTransactorRaw struct { - Contract *DelayedVetoableTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewDelayedVetoable creates a new instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoable(address common.Address, backend bind.ContractBackend) (*DelayedVetoable, error) { - contract, err := bindDelayedVetoable(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &DelayedVetoable{DelayedVetoableCaller: DelayedVetoableCaller{contract: contract}, DelayedVetoableTransactor: DelayedVetoableTransactor{contract: contract}, DelayedVetoableFilterer: DelayedVetoableFilterer{contract: contract}}, nil -} - -// NewDelayedVetoableCaller creates a new read-only instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableCaller(address common.Address, caller bind.ContractCaller) (*DelayedVetoableCaller, error) { - contract, err := bindDelayedVetoable(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &DelayedVetoableCaller{contract: contract}, nil -} - -// NewDelayedVetoableTransactor creates a new write-only instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableTransactor(address common.Address, transactor bind.ContractTransactor) (*DelayedVetoableTransactor, error) { - contract, err := bindDelayedVetoable(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &DelayedVetoableTransactor{contract: contract}, nil -} - -// NewDelayedVetoableFilterer creates a new log filterer instance of DelayedVetoable, bound to a specific deployed contract. -func NewDelayedVetoableFilterer(address common.Address, filterer bind.ContractFilterer) (*DelayedVetoableFilterer, error) { - contract, err := bindDelayedVetoable(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &DelayedVetoableFilterer{contract: contract}, nil -} - -// bindDelayedVetoable binds a generic wrapper to an already deployed contract. -func bindDelayedVetoable(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(DelayedVetoableABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_DelayedVetoable *DelayedVetoableRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DelayedVetoable.Contract.DelayedVetoableCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_DelayedVetoable *DelayedVetoableRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.Contract.DelayedVetoableTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_DelayedVetoable *DelayedVetoableRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DelayedVetoable.Contract.DelayedVetoableTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_DelayedVetoable *DelayedVetoableCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DelayedVetoable.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_DelayedVetoable *DelayedVetoableTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_DelayedVetoable *DelayedVetoableTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DelayedVetoable.Contract.contract.Transact(opts, method, params...) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableCaller) Version(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _DelayedVetoable.contract.Call(opts, &out, "version") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableSession) Version() (string, error) { - return _DelayedVetoable.Contract.Version(&_DelayedVetoable.CallOpts) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_DelayedVetoable *DelayedVetoableCallerSession) Version() (string, error) { - return _DelayedVetoable.Contract.Version(&_DelayedVetoable.CallOpts) -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableTransactor) Delay(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "delay") -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableSession) Delay() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Delay(&_DelayedVetoable.TransactOpts) -} - -// Delay is a paid mutator transaction binding the contract method 0x6a42b8f8. -// -// Solidity: function delay() returns(uint256 delay_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Delay() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Delay(&_DelayedVetoable.TransactOpts) -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableTransactor) Initiator(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "initiator") -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableSession) Initiator() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Initiator(&_DelayedVetoable.TransactOpts) -} - -// Initiator is a paid mutator transaction binding the contract method 0x5c39fcc1. -// -// Solidity: function initiator() returns(address initiator_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Initiator() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Initiator(&_DelayedVetoable.TransactOpts) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableTransactor) QueuedAt(opts *bind.TransactOpts, callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "queuedAt", callHash) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableSession) QueuedAt(callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.QueuedAt(&_DelayedVetoable.TransactOpts, callHash) -} - -// QueuedAt is a paid mutator transaction binding the contract method 0xb912de5d. -// -// Solidity: function queuedAt(bytes32 callHash) returns(uint256 queuedAt_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) QueuedAt(callHash [32]byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.QueuedAt(&_DelayedVetoable.TransactOpts, callHash) -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableTransactor) Target(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "target") -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableSession) Target() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Target(&_DelayedVetoable.TransactOpts) -} - -// Target is a paid mutator transaction binding the contract method 0xd4b83992. -// -// Solidity: function target() returns(address target_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Target() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Target(&_DelayedVetoable.TransactOpts) -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableTransactor) Vetoer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DelayedVetoable.contract.Transact(opts, "vetoer") -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableSession) Vetoer() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Vetoer(&_DelayedVetoable.TransactOpts) -} - -// Vetoer is a paid mutator transaction binding the contract method 0xd8bff440. -// -// Solidity: function vetoer() returns(address vetoer_) -func (_DelayedVetoable *DelayedVetoableTransactorSession) Vetoer() (*types.Transaction, error) { - return _DelayedVetoable.Contract.Vetoer(&_DelayedVetoable.TransactOpts) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.contract.RawTransact(opts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.Fallback(&_DelayedVetoable.TransactOpts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() returns() -func (_DelayedVetoable *DelayedVetoableTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _DelayedVetoable.Contract.Fallback(&_DelayedVetoable.TransactOpts, calldata) -} - -// DelayedVetoableDelayActivatedIterator is returned from FilterDelayActivated and is used to iterate over the raw logs and unpacked data for DelayActivated events raised by the DelayedVetoable contract. -type DelayedVetoableDelayActivatedIterator struct { - Event *DelayedVetoableDelayActivated // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableDelayActivatedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableDelayActivated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableDelayActivated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableDelayActivatedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableDelayActivatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableDelayActivated represents a DelayActivated event raised by the DelayedVetoable contract. -type DelayedVetoableDelayActivated struct { - Delay *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterDelayActivated is a free log retrieval operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterDelayActivated(opts *bind.FilterOpts) (*DelayedVetoableDelayActivatedIterator, error) { - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "DelayActivated") - if err != nil { - return nil, err - } - return &DelayedVetoableDelayActivatedIterator{contract: _DelayedVetoable.contract, event: "DelayActivated", logs: logs, sub: sub}, nil -} - -// WatchDelayActivated is a free log subscription operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchDelayActivated(opts *bind.WatchOpts, sink chan<- *DelayedVetoableDelayActivated) (event.Subscription, error) { - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "DelayActivated") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableDelayActivated) - if err := _DelayedVetoable.contract.UnpackLog(event, "DelayActivated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseDelayActivated is a log parse operation binding the contract event 0xebf28bfb587e28dfffd9173cf71c32ba5d3f0544a0117b5539c9b274a5bba2a8. -// -// Solidity: event DelayActivated(uint256 delay) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseDelayActivated(log types.Log) (*DelayedVetoableDelayActivated, error) { - event := new(DelayedVetoableDelayActivated) - if err := _DelayedVetoable.contract.UnpackLog(event, "DelayActivated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableForwardedIterator is returned from FilterForwarded and is used to iterate over the raw logs and unpacked data for Forwarded events raised by the DelayedVetoable contract. -type DelayedVetoableForwardedIterator struct { - Event *DelayedVetoableForwarded // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableForwardedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableForwarded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableForwarded) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableForwardedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableForwardedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableForwarded represents a Forwarded event raised by the DelayedVetoable contract. -type DelayedVetoableForwarded struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterForwarded is a free log retrieval operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterForwarded(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableForwardedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Forwarded", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableForwardedIterator{contract: _DelayedVetoable.contract, event: "Forwarded", logs: logs, sub: sub}, nil -} - -// WatchForwarded is a free log subscription operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchForwarded(opts *bind.WatchOpts, sink chan<- *DelayedVetoableForwarded, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Forwarded", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableForwarded) - if err := _DelayedVetoable.contract.UnpackLog(event, "Forwarded", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseForwarded is a log parse operation binding the contract event 0x4c109d85bcd0bb5c735b4be850953d652afe4cd9aa2e0b1426a65a4dcb2e1229. -// -// Solidity: event Forwarded(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseForwarded(log types.Log) (*DelayedVetoableForwarded, error) { - event := new(DelayedVetoableForwarded) - if err := _DelayedVetoable.contract.UnpackLog(event, "Forwarded", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableInitiatedIterator is returned from FilterInitiated and is used to iterate over the raw logs and unpacked data for Initiated events raised by the DelayedVetoable contract. -type DelayedVetoableInitiatedIterator struct { - Event *DelayedVetoableInitiated // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableInitiatedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableInitiated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableInitiated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableInitiatedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableInitiatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableInitiated represents a Initiated event raised by the DelayedVetoable contract. -type DelayedVetoableInitiated struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterInitiated is a free log retrieval operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterInitiated(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableInitiatedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Initiated", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableInitiatedIterator{contract: _DelayedVetoable.contract, event: "Initiated", logs: logs, sub: sub}, nil -} - -// WatchInitiated is a free log subscription operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchInitiated(opts *bind.WatchOpts, sink chan<- *DelayedVetoableInitiated, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Initiated", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableInitiated) - if err := _DelayedVetoable.contract.UnpackLog(event, "Initiated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseInitiated is a log parse operation binding the contract event 0x87a332a414acbc7da074543639ce7ae02ff1ea72e88379da9f261b080beb5a13. -// -// Solidity: event Initiated(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseInitiated(log types.Log) (*DelayedVetoableInitiated, error) { - event := new(DelayedVetoableInitiated) - if err := _DelayedVetoable.contract.UnpackLog(event, "Initiated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// DelayedVetoableVetoedIterator is returned from FilterVetoed and is used to iterate over the raw logs and unpacked data for Vetoed events raised by the DelayedVetoable contract. -type DelayedVetoableVetoedIterator struct { - Event *DelayedVetoableVetoed // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *DelayedVetoableVetoedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableVetoed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(DelayedVetoableVetoed) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *DelayedVetoableVetoedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *DelayedVetoableVetoedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// DelayedVetoableVetoed represents a Vetoed event raised by the DelayedVetoable contract. -type DelayedVetoableVetoed struct { - CallHash [32]byte - Data []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterVetoed is a free log retrieval operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) FilterVetoed(opts *bind.FilterOpts, callHash [][32]byte) (*DelayedVetoableVetoedIterator, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.FilterLogs(opts, "Vetoed", callHashRule) - if err != nil { - return nil, err - } - return &DelayedVetoableVetoedIterator{contract: _DelayedVetoable.contract, event: "Vetoed", logs: logs, sub: sub}, nil -} - -// WatchVetoed is a free log subscription operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) WatchVetoed(opts *bind.WatchOpts, sink chan<- *DelayedVetoableVetoed, callHash [][32]byte) (event.Subscription, error) { - - var callHashRule []interface{} - for _, callHashItem := range callHash { - callHashRule = append(callHashRule, callHashItem) - } - - logs, sub, err := _DelayedVetoable.contract.WatchLogs(opts, "Vetoed", callHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(DelayedVetoableVetoed) - if err := _DelayedVetoable.contract.UnpackLog(event, "Vetoed", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseVetoed is a log parse operation binding the contract event 0xbede6852c1d97d93ff557f676de76670cd0dec861e7fe8beb13aa0ba2b0ab040. -// -// Solidity: event Vetoed(bytes32 indexed callHash, bytes data) -func (_DelayedVetoable *DelayedVetoableFilterer) ParseVetoed(log types.Log) (*DelayedVetoableVetoed, error) { - event := new(DelayedVetoableVetoed) - if err := _DelayedVetoable.contract.UnpackLog(event, "Vetoed", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index 9419c127706..032f70bf2a8 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -201,9 +201,6 @@ func initAllocType(root string, allocType AllocType) { panic(err) } - // Do not use clique in the in memory tests. Otherwise block building - // would be much more complex. - dc.L1UseClique = false // Set the L1 genesis block timestamp to now dc.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) dc.FundDevAccounts = true diff --git a/op-e2e/e2eutils/secrets.go b/op-e2e/e2eutils/secrets.go index cd4c91e1e09..9cee2f25c4c 100644 --- a/op-e2e/e2eutils/secrets.go +++ b/op-e2e/e2eutils/secrets.go @@ -19,8 +19,8 @@ const defaultHDPathPrefix = "m/44'/60'/0'/0/" // If these values are changed, it is subject to breaking tests. They // must be in sync with the values in the DeployConfig used to create the system. var DefaultMnemonicConfig = &MnemonicConfig{ - Mnemonic: "test test test test test test test test test test test junk", - CliqueSigner: "m/44'/60'/0'/0/0", + Mnemonic: "test test test test test test test test test test test junk", + // Note: "m/44'/60'/0'/0/0" is a legacy mnemonic path, used for the L1 clique signer. Proposer: "m/44'/60'/0'/0/1", Batcher: "m/44'/60'/0'/0/2", Deployer: "m/44'/60'/0'/0/3", @@ -36,9 +36,8 @@ var DefaultMnemonicConfig = &MnemonicConfig{ type MnemonicConfig struct { Mnemonic string - CliqueSigner string - Deployer string - SysCfgOwner string + Deployer string + SysCfgOwner string // rollup actors Proposer string @@ -66,10 +65,6 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { if err != nil { return nil, err } - cliqueSigner, err := wallet.PrivateKey(account(m.CliqueSigner)) - if err != nil { - return nil, err - } sysCfgOwner, err := wallet.PrivateKey(account(m.SysCfgOwner)) if err != nil { return nil, err @@ -102,7 +97,6 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { return &Secrets{ Deployer: deployer, SysCfgOwner: sysCfgOwner, - CliqueSigner: cliqueSigner, Proposer: proposer, Batcher: batcher, SequencerP2P: sequencerP2P, @@ -115,9 +109,8 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { // Secrets bundles secp256k1 private keys for all common rollup actors for testing purposes. type Secrets struct { - Deployer *ecdsa.PrivateKey - CliqueSigner *ecdsa.PrivateKey - SysCfgOwner *ecdsa.PrivateKey + Deployer *ecdsa.PrivateKey + SysCfgOwner *ecdsa.PrivateKey // rollup actors Proposer *ecdsa.PrivateKey @@ -138,7 +131,6 @@ type Secrets struct { func (s *Secrets) Addresses() *Addresses { return &Addresses{ Deployer: crypto.PubkeyToAddress(s.Deployer.PublicKey), - CliqueSigner: crypto.PubkeyToAddress(s.CliqueSigner.PublicKey), SysCfgOwner: crypto.PubkeyToAddress(s.SysCfgOwner.PublicKey), Proposer: crypto.PubkeyToAddress(s.Proposer.PublicKey), Batcher: crypto.PubkeyToAddress(s.Batcher.PublicKey), @@ -151,9 +143,8 @@ func (s *Secrets) Addresses() *Addresses { // Addresses bundles the addresses for all common rollup addresses for testing purposes. type Addresses struct { - Deployer common.Address - CliqueSigner common.Address - SysCfgOwner common.Address + Deployer common.Address + SysCfgOwner common.Address // rollup actors Proposer common.Address @@ -169,7 +160,6 @@ type Addresses struct { func (a *Addresses) All() []common.Address { return []common.Address{ a.Deployer, - a.CliqueSigner, a.SysCfgOwner, a.Proposer, a.Batcher, diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index 6d116bfa099..2beabfba54d 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -1,9 +1,12 @@ package faultproofs import ( + "bytes" "context" + "encoding/json" "math" "math/big" + "os/exec" "path/filepath" "testing" @@ -12,7 +15,6 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" @@ -276,9 +278,29 @@ func runCannon(t *testing.T, ctx context.Context, sys *e2esys.System, inputs uti err := executor.DoGenerateProof(ctx, proofsDir, math.MaxUint, math.MaxUint, extraVmArgs...) require.NoError(t, err, "failed to generate proof") - state, err := versions.LoadStateFromFile(vm.FinalStatePath(proofsDir, cfg.Cannon.BinarySnapshots)) - require.NoError(t, err, "failed to parse state") - require.True(t, state.GetExited(), "cannon did not exit") - require.Zero(t, state.GetExitCode(), "cannon failed with exit code %d", state.GetExitCode()) - t.Logf("Completed in %d steps", state.GetStep()) + stdOut, _, err := runCmd(ctx, cfg.Cannon.VmBin, "witness", "--input", vm.FinalStatePath(proofsDir, cfg.Cannon.BinarySnapshots)) + require.NoError(t, err, "failed to run witness cmd") + type stateData struct { + Step uint64 `json:"step"` + ExitCode uint8 `json:"exitCode"` + Exited bool `json:"exited"` + } + var data stateData + err = json.Unmarshal([]byte(stdOut), &data) + require.NoError(t, err, "failed to parse state data") + require.True(t, data.Exited, "cannon did not exit") + require.Zero(t, data.ExitCode, "cannon failed with exit code %d", data.ExitCode) + t.Logf("Completed in %d steps", data.Step) +} + +func runCmd(ctx context.Context, binary string, args ...string) (stdOut string, stdErr string, err error) { + var outBuf bytes.Buffer + var errBuf bytes.Buffer + cmd := exec.CommandContext(ctx, binary, args...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + err = cmd.Run() + stdOut = outBuf.String() + stdErr = errBuf.String() + return } diff --git a/op-node/metrics/metrics.go b/op-node/metrics/metrics.go index 88d8c4d0caa..6e1b664eca2 100644 --- a/op-node/metrics/metrics.go +++ b/op-node/metrics/metrics.go @@ -35,6 +35,7 @@ type Metricer interface { RecordRPCClientRequest(method string) func(err error) RecordRPCClientResponse(method string, err error) SetDerivationIdle(status bool) + SetSequencerState(active bool) RecordPipelineReset() RecordSequencingError() RecordPublishingError() @@ -48,7 +49,7 @@ type Metricer interface { RecordL2Ref(name string, ref eth.L2BlockRef) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) RecordDerivedBatches(batchType string) - CountSequencedTxs(count int) + CountSequencedTxsInBlock(txns int, deposits int) RecordL1ReorgDepth(d uint64) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) RecordSequencerReset() @@ -94,6 +95,7 @@ type Metrics struct { DerivationErrors *metrics.Event SequencingErrors *metrics.Event PublishingErrors *metrics.Event + SequencerActive prometheus.Gauge EmittedEvents *prometheus.CounterVec ProcessedEvents *prometheus.CounterVec @@ -133,7 +135,7 @@ type Metrics struct { L1ReorgDepth prometheus.Histogram - TransactionsSequencedTotal prometheus.Counter + TransactionsSequencedTotal *prometheus.CounterVec AltDAMetrics altda.Metricer @@ -209,6 +211,11 @@ func NewMetrics(procName string) *Metrics { DerivationErrors: metrics.NewEvent(factory, ns, "", "derivation_errors", "derivation errors"), SequencingErrors: metrics.NewEvent(factory, ns, "", "sequencing_errors", "sequencing errors"), PublishingErrors: metrics.NewEvent(factory, ns, "", "publishing_errors", "p2p publishing errors"), + SequencerActive: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Name: "sequencer_active", + Help: "1 if sequencer active, 0 otherwise", + }), EmittedEvents: factory.NewCounterVec( prometheus.CounterOpts{ @@ -261,12 +268,11 @@ func NewMetrics(procName string) *Metrics { Help: "Histogram of L1 Reorg Depths", }), - TransactionsSequencedTotal: factory.NewGauge(prometheus.GaugeOpts{ + TransactionsSequencedTotal: factory.NewCounterVec(prometheus.CounterOpts{ Namespace: ns, Name: "transactions_sequenced_total", Help: "Count of total transactions sequenced", - }), - + }, []string{"type"}), PeerCount: factory.NewGauge(prometheus.GaugeOpts{ Namespace: ns, Subsystem: "p2p", @@ -470,6 +476,14 @@ func (m *Metrics) SetDerivationIdle(status bool) { m.DerivationIdle.Set(val) } +func (m *Metrics) SetSequencerState(active bool) { + var val float64 + if active { + val = 1 + } + m.SequencerActive.Set(val) +} + func (m *Metrics) RecordPipelineReset() { m.PipelineResets.Record() } @@ -516,8 +530,9 @@ func (m *Metrics) RecordDerivedBatches(batchType string) { m.DerivedBatches.Record(batchType) } -func (m *Metrics) CountSequencedTxs(count int) { - m.TransactionsSequencedTotal.Add(float64(count)) +func (m *Metrics) CountSequencedTxsInBlock(txns int, deposits int) { + m.TransactionsSequencedTotal.WithLabelValues("deposits").Add(float64(deposits)) + m.TransactionsSequencedTotal.WithLabelValues("txns").Add(float64(txns - deposits)) } func (m *Metrics) RecordL1ReorgDepth(d uint64) { @@ -686,6 +701,9 @@ func (n *noopMetricer) RecordUp() { func (n *noopMetricer) SetDerivationIdle(status bool) { } +func (m *noopMetricer) SetSequencerState(active bool) { +} + func (n *noopMetricer) RecordPipelineReset() { } @@ -725,7 +743,7 @@ func (n *noopMetricer) RecordUnsafePayloadsBuffer(length uint64, memSize uint64, func (n *noopMetricer) RecordDerivedBatches(batchType string) { } -func (n *noopMetricer) CountSequencedTxs(count int) { +func (n *noopMetricer) CountSequencedTxsInBlock(txns int, deposits int) { } func (n *noopMetricer) RecordL1ReorgDepth(d uint64) { diff --git a/op-node/node/config_persistence.go b/op-node/node/config_persistence.go index 7a30c11b9c9..3f2b8b47537 100644 --- a/op-node/node/config_persistence.go +++ b/op-node/node/config_persistence.go @@ -55,6 +55,7 @@ func (p *ActiveConfigPersistence) SequencerStopped() error { func (p *ActiveConfigPersistence) persist(sequencerStarted bool) error { p.lock.Lock() defer p.lock.Unlock() + data, err := json.Marshal(persistedState{SequencerStarted: &sequencerStarted}) if err != nil { return fmt.Errorf("marshall new config: %w", err) diff --git a/op-node/p2p/config.go b/op-node/p2p/config.go index ee21ba20fc3..10a75881b87 100644 --- a/op-node/p2p/config.go +++ b/op-node/p2p/config.go @@ -29,7 +29,6 @@ var DefaultBootnodes = []*enode.Node{ // OP Labs enode.MustParse("enode://869d07b5932f17e8490990f75a3f94195e9504ddb6b85f7189e5a9c0a8fff8b00aecf6f3ac450ecba6cdabdb5858788a94bde2b613e0f2d82e9b395355f76d1a@34.65.67.101:30305"), enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:30305"), - enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:30305"), // Base enode.MustParse("enr:-J24QNz9lbrKbN4iSmmjtnr7SjUMk4zB7f1krHZcTZx-JRKZd0kA2gjufUROD6T3sOWDVDnFJRvqBBo62zuF-hYCohOGAYiOoEyEgmlkgnY0gmlwhAPniryHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQKNVFlCxh_B-716tTs-h1vMzZkSs1FTu_OYTNjgufplG4N0Y3CCJAaDdWRwgiQG"), enode.MustParse("enr:-J24QH-f1wt99sfpHy4c0QJM-NfmsIfmlLAMMcgZCUEgKG_BBYFc6FwYgaMJMQN5dsRBJApIok0jFn-9CS842lGpLmqGAYiOoDRAgmlkgnY0gmlwhLhIgb2Hb3BzdGFja4OFQgCJc2VjcDI1NmsxoQJ9FTIv8B9myn1MWaC_2lJ-sMoeCDkusCsk4BYHjjCq04N0Y3CCJAaDdWRwgiQG"), @@ -37,7 +36,9 @@ var DefaultBootnodes = []*enode.Node{ enode.MustParse("enr:-J24QHmGyBwUZXIcsGYMaUqGGSl4CFdx9Tozu-vQCn5bHIQbR7On7dZbU61vYvfrJr30t0iahSqhc64J46MnUO2JvQaGAYiOoCKKgmlkgnY0gmlwhAPnCzSHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQINc4fSijfbNIiGhcgvwjsjxVFJHUstK9L1T8OTKUjgloN0Y3CCJAaDdWRwgiQG"), enode.MustParse("enr:-J24QG3ypT4xSu0gjb5PABCmVxZqBjVw9ca7pvsI8jl4KATYAnxBmfkaIuEqy9sKvDHKuNCsy57WwK9wTt2aQgcaDDyGAYiOoGAXgmlkgnY0gmlwhDbGmZaHb3BzdGFja4OFQgCJc2VjcDI1NmsxoQIeAK_--tcLEiu7HvoUlbV52MspE0uCocsx1f_rYvRenIN0Y3CCJAaDdWRwgiQG"), // Conduit - enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:30305"), + enode.MustParse("enode://d25ce99435982b04d60c4b41ba256b84b888626db7bee45a9419382300fbe907359ae5ef250346785bff8d3b9d07cd3e017a27e2ee3cfda3bcbb0ba762ac9674@bootnode.conduit.xyz:0?discport=30301"), + enode.MustParse("enode://2d4e7e9d48f4dd4efe9342706dd1b0024681bd4c3300d021f86fc75eab7865d4e0cbec6fbc883f011cfd6a57423e7e2f6e104baad2b744c3cafaec6bc7dc92c1@34.65.43.171:0?discport=30305"), + enode.MustParse("enode://9d7a3efefe442351217e73b3a593bcb8efffb55b4807699972145324eab5e6b382152f8d24f6301baebbfb5ecd4127bd3faab2842c04cd432bdf50ba092f6645@34.65.109.126:0?discport=30305"), } type HostMetrics interface { diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 1fd751846cf..01a05fe2b53 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -49,6 +49,7 @@ type Metrics interface { RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) SetDerivationIdle(idle bool) + SetSequencerState(active bool) RecordL1ReorgDepth(d uint64) diff --git a/op-node/rollup/engine/build_seal.go b/op-node/rollup/engine/build_seal.go index b292681e13f..25c1d95b8e7 100644 --- a/op-node/rollup/engine/build_seal.go +++ b/op-node/rollup/engine/build_seal.go @@ -110,10 +110,11 @@ func (eq *EngDeriver) onBuildSeal(ev BuildSealEvent) { eq.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(eq.cfg.BlockTime)*time.Second) txnCount := len(envelope.ExecutionPayload.Transactions) - eq.metrics.CountSequencedTxs(txnCount) + depositCount, _ := lastDeposit(envelope.ExecutionPayload.Transactions) + eq.metrics.CountSequencedTxsInBlock(txnCount, depositCount) eq.log.Debug("Processed new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin, - "txs", txnCount, "time", ref.Time, "seal_time", sealTime, "build_time", buildTime) + "txs", txnCount, "deposits", depositCount, "time", ref.Time, "seal_time", sealTime, "build_time", buildTime) eq.emitter.Emit(BuildSealedEvent{ Concluding: ev.Concluding, diff --git a/op-node/rollup/engine/events.go b/op-node/rollup/engine/events.go index bb449956487..b6ba85cbcbe 100644 --- a/op-node/rollup/engine/events.go +++ b/op-node/rollup/engine/events.go @@ -15,7 +15,7 @@ import ( ) type Metrics interface { - CountSequencedTxs(count int) + CountSequencedTxsInBlock(txns int, deposits int) RecordSequencerBuildingDiffTime(duration time.Duration) RecordSequencerSealingTime(duration time.Duration) diff --git a/op-node/rollup/interop/interop.go b/op-node/rollup/interop/interop.go index a4342b6a19f..94fa77a5b30 100644 --- a/op-node/rollup/interop/interop.go +++ b/op-node/rollup/interop/interop.go @@ -3,6 +3,7 @@ package interop import ( "context" "fmt" + "strings" "sync" "time" @@ -139,7 +140,10 @@ func (d *InteropDeriver) onInteropPendingSafeChangedEvent(x engine.InteropPendin defer cancel() if err := d.backend.UpdateLocalSafe(ctx, d.chainID, x.DerivedFrom, x.Ref.BlockRef()); err != nil { d.log.Debug("Failed to signal derived-from update to interop backend", "derivedFrom", x.DerivedFrom, "block", x.Ref) - // still continue to try and do a cross-safe update + if strings.Contains(err.Error(), "too far behind") { + d.log.Error("Supervisor is too far behind, resetting derivation", "err", err) + d.emitter.Emit(rollup.ResetEvent{Err: fmt.Errorf("supervisor is too far behind: %w", err)}) + } } // Now that the op-supervisor is aware of the new local-safe block, we want to check if cross-safe changed. d.emitter.Emit(engine.RequestCrossSafeEvent{}) diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index b6605d601fa..e8b7033273e 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -33,6 +33,7 @@ type L1OriginSelectorIface interface { } type Metrics interface { + SetSequencerState(active bool) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) RecordSequencerReset() RecordSequencingError() @@ -619,6 +620,7 @@ func (d *Sequencer) Init(ctx context.Context, active bool) error { if active { return d.forceStart() } else { + d.metrics.SetSequencerState(false) if err := d.listener.SequencerStopped(); err != nil { return fmt.Errorf("failed to notify sequencer-state listener of initial stopped state: %w", err) } @@ -652,6 +654,7 @@ func (d *Sequencer) forceStart() error { d.nextActionOK = true d.nextAction = d.timeNow() d.active.Store(true) + d.metrics.SetSequencerState(true) d.log.Info("Sequencer has been started", "next action", d.nextAction) return nil } @@ -697,6 +700,7 @@ func (d *Sequencer) Stop(ctx context.Context) (common.Hash, error) { d.nextActionOK = false d.active.Store(false) + d.metrics.SetSequencerState(false) d.log.Info("Sequencer has been stopped") return d.latestHead.Hash, nil } diff --git a/op-node/version/version.go b/op-node/version/version.go index 327ee7b4972..2456f656d45 100644 --- a/op-node/version/version.go +++ b/op-node/version/version.go @@ -1,6 +1,6 @@ package version var ( - Version = "v0.10.14" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-program/README.md b/op-program/README.md index 15e89ffb5cf..932ec85db22 100644 --- a/op-program/README.md +++ b/op-program/README.md @@ -45,6 +45,7 @@ After running `make reproducible-prestate`, the following files can be found in [./bin/](./bin/): - [`op-program`](./bin/op-program) - [`op-program-client.elf`](./bin/op-program-client.elf) +- [`op-program-client64.elf`](./bin/op-program-client64.elf) - [`prestate.bin.gz`](./bin/prestate.bin.gz) - [`prestate-proof.json`](./bin/prestate-proof.json) diff --git a/op-program/host/version/version.go b/op-program/host/version/version.go index 327ee7b4972..2456f656d45 100644 --- a/op-program/host/version/version.go +++ b/op-program/host/version/version.go @@ -1,6 +1,6 @@ package version var ( - Version = "v0.10.14" + Version = "v0.0.0" Meta = "dev" ) diff --git a/op-proposer/cmd/main.go b/op-proposer/cmd/main.go index cbb21fb2851..e5096351fdd 100644 --- a/op-proposer/cmd/main.go +++ b/op-proposer/cmd/main.go @@ -19,7 +19,7 @@ import ( ) var ( - Version = "v0.10.14" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-service/eth/sync_status.go b/op-service/eth/sync_status.go index f9db1f672b8..e16275920e2 100644 --- a/op-service/eth/sync_status.go +++ b/op-service/eth/sync_status.go @@ -5,7 +5,7 @@ package eth type SyncStatus struct { // CurrentL1 is the L1 block that the derivation process is last idled at. // This may not be fully derived into L2 data yet. - // The safe L2 blocks were produced/included fully from the L1 chain up to and including this L1 block. + // The safe L2 blocks were produced/included fully from the L1 chain up to _but excluding_ this L1 block. // If the node is synced, this matches the HeadL1, minus the verifier confirmation distance. CurrentL1 L1BlockRef `json:"current_l1"` // CurrentL1Finalized is a legacy sync-status attribute. This is deprecated. diff --git a/op-service/testutils/anvil/anvil.go b/op-service/testutils/anvil/anvil.go index 7419f9da625..50590a096a7 100644 --- a/op-service/testutils/anvil/anvil.go +++ b/op-service/testutils/anvil/anvil.go @@ -38,6 +38,8 @@ func New(l1RPCURL string, logger log.Logger) (*Runner, error) { "--fork-url", l1RPCURL, "--port", "0", + "--base-fee", + "1000000000", ) stdout, err := proc.StdoutPipe() if err != nil { diff --git a/op-service/testutils/metrics.go b/op-service/testutils/metrics.go index 421d32f2109..25edee14a06 100644 --- a/op-service/testutils/metrics.go +++ b/op-service/testutils/metrics.go @@ -17,7 +17,7 @@ type TestDerivationMetrics struct { FnRecordChannelTimedOut func() } -func (t *TestDerivationMetrics) CountSequencedTxs(count int) { +func (t *TestDerivationMetrics) CountSequencedTxsInBlock(txns int, deposits int) { } func (t *TestDerivationMetrics) RecordSequencerBuildingDiffTime(duration time.Duration) { diff --git a/op-supervisor/cmd/main.go b/op-supervisor/cmd/main.go index 8e306bf9009..5aec4e927d0 100644 --- a/op-supervisor/cmd/main.go +++ b/op-supervisor/cmd/main.go @@ -20,7 +20,7 @@ import ( ) var ( - Version = "v0.0.1" + Version = "v0.0.0" GitCommit = "" GitDate = "" ) diff --git a/op-supervisor/supervisor/backend/db/fromda/update.go b/op-supervisor/supervisor/backend/db/fromda/update.go index 146e558cf26..957df9e2dfa 100644 --- a/op-supervisor/supervisor/backend/db/fromda/update.go +++ b/op-supervisor/supervisor/backend/db/fromda/update.go @@ -67,8 +67,10 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { derived, derived.ParentHash, lastDerived, types.ErrConflict) } } else if lastDerived.Number+1 < derived.Number { - return fmt.Errorf("derived block %s (parent: %s) is too new, expected to build on top of %s: %w", - derived, derived.ParentHash, lastDerived, types.ErrOutOfOrder) + return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", + derived, derivedFrom, + lastDerived, lastDerivedFrom, + types.ErrOutOfOrder) } else { return fmt.Errorf("derived block %s is older than current derived block %s: %w", derived, lastDerived, types.ErrOutOfOrder) @@ -89,8 +91,10 @@ func (db *DB) AddDerived(derivedFrom eth.BlockRef, derived eth.BlockRef) error { } } else if lastDerivedFrom.Number+1 < derivedFrom.Number { // adding block that is derived from something too far into the future - return fmt.Errorf("cannot add block %s as derived from %s, still deriving from %s: %w", - derived, derivedFrom, lastDerivedFrom, types.ErrOutOfOrder) + return fmt.Errorf("cannot add block (%s derived from %s), last block (%s derived from %s) is too far behind: (%w)", + derived, derivedFrom, + lastDerived, lastDerivedFrom, + types.ErrOutOfOrder) } else { // adding block that is derived from something too old return fmt.Errorf("cannot add block %s as derived from %s, deriving already at %s: %w", diff --git a/op-ufm/README.md b/op-ufm/README.md deleted file mode 100644 index 08d1b4a6eca..00000000000 --- a/op-ufm/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# ⚠️ Important -This project has been moved to [ethereum-optimism/infra](https://github.com/ethereum-optimism/infra) - -# OP User Facing Monitoring - -This project simulates a synthetic user interacting with a OP Stack chain. - -It is intended to be used as a tool for monitoring -the health of the network by measuring end-to-end transaction latency. - - -## Metrics - -* Round-trip duration time to get transaction receipt (from creation timestamp) - -* First-seen duration time (from creation timestamp) - - -## Usage - -Run `make ufm` to build the binary. No additional dependencies are necessary. - -Copy `example.config.toml` to `config.toml` and edit the file to configure the service. - -Start the service with `ufm config.toml`. - diff --git a/op-wheel/cheat/cheat.go b/op-wheel/cheat/cheat.go index 4b2d428e102..1b5089c2836 100644 --- a/op-wheel/cheat/cheat.go +++ b/op-wheel/cheat/cheat.go @@ -138,7 +138,7 @@ func (ch *Cheater) RunAndClose(fn HeadFn) error { // Geth stores the TD for each block separately from the block itself. We must update this // manually, otherwise Geth thinks we haven't reached TTD yet and tries to build a block - // using Clique consensus, which causes a panic. + // using pre-merge consensus, which causes a panic. rawdb.WriteTd(batch, blockHash, preID.Number, ch.Blockchain.GetTd(preID.Hash, preID.Number)) // Need to copy over receipts since they are keyed by block hash. diff --git a/ops-bedrock/l1-geth.Dockerfile b/ops-bedrock/l1-geth.Dockerfile index c84a5debf72..50262ec94ea 100644 --- a/ops-bedrock/l1-geth.Dockerfile +++ b/ops-bedrock/l1-geth.Dockerfile @@ -1,4 +1,4 @@ -FROM ethereum/client-go:v1.14.11 +FROM ethereum/client-go:v1.14.12 RUN apk add --no-cache jq bash diff --git a/ops-bedrock/l2-op-geth-interop.Dockerfile b/ops-bedrock/l2-op-geth-interop.Dockerfile index 41a667c0fc2..5021fede46d 100644 --- a/ops-bedrock/l2-op-geth-interop.Dockerfile +++ b/ops-bedrock/l2-op-geth-interop.Dockerfile @@ -1,4 +1,4 @@ -FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.1-rc.3 +FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.2-rc.1 # Note: depend on dev-release for sequencer interop message checks RUN apk add --no-cache jq diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index e7e183804ab..da0fc156f02 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -12,7 +12,12 @@ ARG TARGET_BASE_IMAGE=alpine:3.20 # We may be cross-building for another platform. Specify which platform we need as builder. FROM --platform=$BUILDPLATFORM golang:1.22.7-alpine3.20 AS builder -RUN apk add --no-cache make gcc musl-dev linux-headers git jq bash +RUN apk add --no-cache curl tar gzip make gcc musl-dev linux-headers git jq bash + +# install versioned toolchain +COPY ./versions.json . +RUN curl -L https://github.com/casey/just/releases/download/$(jq -r .just < versions.json)/just-$(jq -r .just < versions.json)-x86_64-unknown-linux-musl.tar.gz | \ + tar xz -C /usr/local/bin just # We copy the go.mod/sum first, so the `go mod download` does not have to re-run if dependencies do not change. COPY ./go.mod /app/go.mod diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index bd700e291e4..edf3bbc1c51 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -20,3 +20,5 @@ !/op-alt-da !/go.mod !/go.sum +!/just +!/versions.json diff --git a/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json b/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json index 4239d10c3d0..8cc17a8f668 100644 --- a/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json +++ b/packages/contracts-bedrock/deploy-config/sepolia-devnet-0.json @@ -15,8 +15,6 @@ "l2OutputOracleStartingBlockNumber": 0, "l2OutputOracleProposer": "0x95014c45078354ff839f14192228108eac82e00a", "l2OutputOracleChallenger": "0x8c20c40180751d93e939dddee3517ae0d1ebead2", - "cliqueSignerAddress": "0x0000000000000000000000000000000000000000", - "l1UseClique": false, "l1BlockTime": 12, "l1GenesisBlockTimestamp": "0x0", "l1GenesisBlockNonce": "0x0", diff --git a/packages/contracts-bedrock/meta/STYLE_GUIDE.md b/packages/contracts-bedrock/meta/STYLE_GUIDE.md index 1d8b84818b9..00af13d5a5b 100644 --- a/packages/contracts-bedrock/meta/STYLE_GUIDE.md +++ b/packages/contracts-bedrock/meta/STYLE_GUIDE.md @@ -96,8 +96,8 @@ Spacers MUST be `private`. All contracts should be assumed to live behind proxies (except in certain special circumstances). This means that new contracts MUST be built under the assumption of upgradeability. -We use a minimal [`Proxy`](./src/universal/Proxy.sol) contract designed to be owned by a -corresponding [`ProxyAdmin`](./src/universal/ProxyAdmin.sol) which follow the interfaces +We use a minimal [`Proxy`](../src/universal/Proxy.sol) contract designed to be owned by a +corresponding [`ProxyAdmin`](../src/universal/ProxyAdmin.sol) which follow the interfaces of OpenZeppelin's `Proxy` and `ProxyAdmin` contracts, respectively. Unless explicitly discussed otherwise, you MUST include the following basic upgradeability diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 54cc1a23e17..6dc6a8bb90b 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -162,7 +162,7 @@ contract Deploy is Deployer { L1ERC721Bridge: getAddress("L1ERC721BridgeProxy"), ProtocolVersions: getAddress("ProtocolVersionsProxy"), SuperchainConfig: getAddress("SuperchainConfigProxy"), - OPContractsManager: getAddress("OPContractsManagerProxy") + OPContractsManager: getAddress("OPContractsManager") }); } @@ -378,13 +378,13 @@ contract Deploy is Deployer { dii.set(dii.disputeGameFinalityDelaySeconds.selector, cfg.disputeGameFinalityDelaySeconds()); dii.set(dii.mipsVersion.selector, Config.useMultithreadedCannon() ? 2 : 1); string memory release = "dev"; - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set( dii.standardVersionsToml.selector, string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml") ); dii.set(dii.superchainConfigProxy.selector, mustGetAddress("SuperchainConfigProxy")); dii.set(dii.protocolVersionsProxy.selector, mustGetAddress("ProtocolVersionsProxy")); - dii.set(dii.opcmProxyOwner.selector, cfg.finalSystemOwner()); + dii.set(dii.salt.selector, _implSalt()); if (_isInterop) { di = DeployImplementations(new DeployImplementationsInterop()); @@ -409,8 +409,7 @@ contract Deploy is Deployer { save("DelayedWETH", address(dio.delayedWETHImpl())); save("PreimageOracle", address(dio.preimageOracleSingleton())); save("Mips", address(dio.mipsSingleton())); - save("OPContractsManagerProxy", address(dio.opcmProxy())); - save("OPContractsManager", address(dio.opcmImpl())); + save("OPContractsManager", address(dio.opcm())); Types.ContractSet memory contracts = _impls(); ChainAssertions.checkL1CrossDomainMessenger({ _contracts: contracts, _vm: vm, _isProxy: false }); @@ -446,7 +445,7 @@ contract Deploy is Deployer { // Ensure that the requisite contracts are deployed address superchainConfigProxy = mustGetAddress("SuperchainConfigProxy"); - OPContractsManager opcm = OPContractsManager(mustGetAddress("OPContractsManagerProxy")); + OPContractsManager opcm = OPContractsManager(mustGetAddress("OPContractsManager")); OPContractsManager.DeployInput memory deployInput = getDeployInput(); OPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); @@ -697,12 +696,12 @@ contract Deploy is Deployer { addr_ = address(oracle); } - /// @notice Deploy Mips VM. Deploys either MIPS or MIPS2 depending on the environment + /// @notice Deploy Mips VM. Deploys either MIPS or MIPS64 depending on the environment function deployMips() public broadcast returns (address addr_) { addr_ = DeployUtils.create2AndSave({ _save: this, _salt: _implSalt(), - _name: Config.useMultithreadedCannon() ? "MIPS2" : "MIPS", + _name: Config.useMultithreadedCannon() ? "MIPS64" : "MIPS", _args: DeployUtils.encodeConstructor( abi.encodeCall(IMIPS2.__constructor__, (IPreimageOracle(mustGetAddress("PreimageOracle")))) ) @@ -1021,7 +1020,7 @@ contract Deploy is Deployer { mipsAbsolutePrestate_ = Claim.wrap(abi.decode(bytes(Process.bash(string.concat("cat ", filePath, " | jq -r .pre"))), (bytes32))); console.log( - "[MT-Cannon Dispute Game] Using devnet MIPS2 Absolute prestate: %s", + "[MT-Cannon Dispute Game] Using devnet MIPS64 Absolute prestate: %s", vm.toString(Claim.unwrap(mipsAbsolutePrestate_)) ); } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol index 51b60c6c299..c128260a21d 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployDisputeGame.s.sol @@ -376,7 +376,7 @@ contract DeployDisputeGame is Script { vm.broadcast(msg.sender); singleton = IMIPS( DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", + _name: mipsVersion == 1 ? "MIPS" : "MIPS64", _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) }) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index c9048a07dfa..71ba435df4c 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -8,16 +8,11 @@ import { LibString } from "@solady/utils/LibString.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; -import { IL1CrossDomainMessengerV160 } from "src/L1/interfaces/IL1CrossDomainMessengerV160.sol"; -import { IL1StandardBridgeV160 } from "src/L1/interfaces/IL1StandardBridgeV160.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { IProxy } from "src/universal/interfaces/IProxy.sol"; - import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; import { IMIPS } from "src/cannon/interfaces/IMIPS.sol"; @@ -51,8 +46,9 @@ contract DeployImplementationsInput is BaseDeployIO { uint256 internal _disputeGameFinalityDelaySeconds; uint256 internal _mipsVersion; - // The release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - string internal _release; + // This is used in opcm to signal which version of the L1 smart contracts is deployed. + // It takes the format of `op-contracts/v*.*.*`. + string internal _l1ContractsRelease; // Outputs from DeploySuperchain.s.sol. ISuperchainConfig internal _superchainConfigProxy; @@ -60,8 +56,6 @@ contract DeployImplementationsInput is BaseDeployIO { string internal _standardVersionsToml; - address internal _opcmProxyOwner; - function set(bytes4 _sel, uint256 _value) public { require(_value != 0, "DeployImplementationsInput: cannot set zero value"); @@ -85,7 +79,7 @@ contract DeployImplementationsInput is BaseDeployIO { function set(bytes4 _sel, string memory _value) public { require(!LibString.eq(_value, ""), "DeployImplementationsInput: cannot set empty string"); - if (_sel == this.release.selector) _release = _value; + if (_sel == this.l1ContractsRelease.selector) _l1ContractsRelease = _value; else if (_sel == this.standardVersionsToml.selector) _standardVersionsToml = _value; else revert("DeployImplementationsInput: unknown selector"); } @@ -94,7 +88,6 @@ contract DeployImplementationsInput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsInput: cannot set zero address"); if (_sel == this.superchainConfigProxy.selector) _superchainConfigProxy = ISuperchainConfig(_addr); else if (_sel == this.protocolVersionsProxy.selector) _protocolVersionsProxy = IProtocolVersions(_addr); - else if (_sel == this.opcmProxyOwner.selector) _opcmProxyOwner = _addr; else revert("DeployImplementationsInput: unknown selector"); } @@ -141,9 +134,9 @@ contract DeployImplementationsInput is BaseDeployIO { return _mipsVersion; } - function release() public view returns (string memory) { - require(!LibString.eq(_release, ""), "DeployImplementationsInput: not set"); - return _release; + function l1ContractsRelease() public view returns (string memory) { + require(!LibString.eq(_l1ContractsRelease, ""), "DeployImplementationsInput: not set"); + return _l1ContractsRelease; } function standardVersionsToml() public view returns (string memory) { @@ -160,16 +153,10 @@ contract DeployImplementationsInput is BaseDeployIO { require(address(_protocolVersionsProxy) != address(0), "DeployImplementationsInput: not set"); return _protocolVersionsProxy; } - - function opcmProxyOwner() public view returns (address) { - require(address(_opcmProxyOwner) != address(0), "DeployImplementationsInput: not set"); - return _opcmProxyOwner; - } } contract DeployImplementationsOutput is BaseDeployIO { - OPContractsManager internal _opcmProxy; - OPContractsManager internal _opcmImpl; + OPContractsManager internal _opcm; IDelayedWETH internal _delayedWETHImpl; IOptimismPortal2 internal _optimismPortalImpl; IPreimageOracle internal _preimageOracleSingleton; @@ -185,8 +172,7 @@ contract DeployImplementationsOutput is BaseDeployIO { require(_addr != address(0), "DeployImplementationsOutput: cannot set zero address"); // forgefmt: disable-start - if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(payable(_addr)); - else if (_sel == this.opcmImpl.selector) _opcmImpl = OPContractsManager(payable(_addr)); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else if (_sel == this.optimismPortalImpl.selector) _optimismPortalImpl = IOptimismPortal2(payable(_addr)); else if (_sel == this.delayedWETHImpl.selector) _delayedWETHImpl = IDelayedWETH(payable(_addr)); else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = IPreimageOracle(_addr); @@ -201,12 +187,11 @@ contract DeployImplementationsOutput is BaseDeployIO { // forgefmt: disable-end } - function checkOutput(DeployImplementationsInput _dii) public { + function checkOutput(DeployImplementationsInput _dii) public view { // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. address[] memory addrs1 = Solarray.addresses( - address(this.opcmProxy()), - address(this.opcmImpl()), + address(this.opcm()), address(this.optimismPortalImpl()), address(this.delayedWETHImpl()), address(this.preimageOracleSingleton()), @@ -227,15 +212,9 @@ contract DeployImplementationsOutput is BaseDeployIO { assertValidDeploy(_dii); } - function opcmProxy() public returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_opcmProxy)); - return _opcmProxy; - } - - function opcmImpl() public view returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmImpl)); - return _opcmImpl; + function opcm() public view returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } function optimismPortalImpl() public view returns (IOptimismPortal2) { @@ -289,40 +268,22 @@ contract DeployImplementationsOutput is BaseDeployIO { } // -------- Deployment Assertions -------- - function assertValidDeploy(DeployImplementationsInput _dii) public { + function assertValidDeploy(DeployImplementationsInput _dii) public view { assertValidDelayedWETHImpl(_dii); assertValidDisputeGameFactoryImpl(_dii); assertValidL1CrossDomainMessengerImpl(_dii); assertValidL1ERC721BridgeImpl(_dii); assertValidL1StandardBridgeImpl(_dii); assertValidMipsSingleton(_dii); - assertValidOpcmProxy(_dii); - assertValidOpcmImpl(_dii); + assertValidOpcm(_dii); assertValidOptimismMintableERC20FactoryImpl(_dii); assertValidOptimismPortalImpl(_dii); assertValidPreimageOracleSingleton(_dii); assertValidSystemConfigImpl(_dii); } - function assertValidOpcmProxy(DeployImplementationsInput _dii) internal { - // First we check the proxy as itself. - IProxy proxy = IProxy(payable(address(opcmProxy()))); - vm.prank(address(0)); - address admin = proxy.admin(); - require(admin == address(_dii.opcmProxyOwner()), "OPCMP-10"); - - // Then we check the proxy as OPCM. - DeployUtils.assertInitialized({ _contractAddress: address(opcmProxy()), _slot: 0, _offset: 0 }); - require(address(opcmProxy().superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMP-20"); - require(address(opcmProxy().protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMP-30"); - require(LibString.eq(opcmProxy().latestRelease(), _dii.release()), "OPCMP-50"); // Initial release is latest. - } - - function assertValidOpcmImpl(DeployImplementationsInput _dii) internal { - IProxy proxy = IProxy(payable(address(opcmProxy()))); - vm.prank(address(0)); - OPContractsManager impl = OPContractsManager(proxy.implementation()); - DeployUtils.assertInitialized({ _contractAddress: address(impl), _slot: 0, _offset: 0 }); + function assertValidOpcm(DeployImplementationsInput _dii) internal view { + OPContractsManager impl = OPContractsManager(address(opcm())); require(address(impl.superchainConfig()) == address(_dii.superchainConfigProxy()), "OPCMI-10"); require(address(impl.protocolVersions()) == address(_dii.protocolVersionsProxy()), "OPCMI-20"); } @@ -361,7 +322,6 @@ contract DeployImplementationsOutput is BaseDeployIO { function assertValidMipsSingleton(DeployImplementationsInput) internal view { IMIPS mips = mipsSingleton(); - require(address(mips.oracle()) == address(preimageOracleSingleton()), "MIPS-10"); } @@ -480,102 +440,38 @@ contract DeployImplementations is Script { // --- OP Contracts Manager --- - function opcmSystemConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - // When configuring OPCM during Solidity tests, we are using the latest SystemConfig.sol - // version in this repo, which contains Custom Gas Token (CGT) features. This CGT version - // has a different `initialize` signature than the SystemConfig version that was released - // as part of `op-contracts/v1.6.0`, which is no longer in the repo. When running this - // script's bytecode for a production deploy of OPCM at `op-contracts/v1.6.0`, we need to - // use the ISystemConfigV160 interface instead of ISystemConfig. Therefore the selector used - // is a function of the `release` passed in by the caller. - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? ISystemConfigV160.initialize.selector - : ISystemConfig.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "SystemConfig", - info: OPContractsManager.Implementation(address(_dio.systemConfigImpl()), selector) - }); - } - - function l1CrossDomainMessengerConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? IL1CrossDomainMessengerV160.initialize.selector - : IL1CrossDomainMessenger.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "L1CrossDomainMessenger", - info: OPContractsManager.Implementation(address(_dio.l1CrossDomainMessengerImpl()), selector) - }); - } - - function l1StandardBridgeConfigSetter( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - internal - view - virtual - returns (OPContractsManager.ImplementationSetter memory) - { - bytes4 selector = LibString.eq(_dii.release(), "op-contracts/v1.6.0") - ? IL1StandardBridgeV160.initialize.selector - : IL1StandardBridge.initialize.selector; - return OPContractsManager.ImplementationSetter({ - name: "L1StandardBridge", - info: OPContractsManager.Implementation(address(_dio.l1StandardBridgeImpl()), selector) - }); - } - - // Deploy and initialize a proxied OPContractsManager. function createOPCMContract( DeployImplementationsInput _dii, DeployImplementationsOutput _dio, OPContractsManager.Blueprints memory _blueprints, - string memory _release, - OPContractsManager.ImplementationSetter[] memory _setters + string memory _l1ContractsRelease ) internal virtual - returns (OPContractsManager opcmProxy_) + returns (OPContractsManager opcm_) { - address opcmProxyOwner = _dii.opcmProxyOwner(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_dio.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_dio.optimismPortalImpl()), + systemConfigImpl: address(_dio.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_dio.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_dio.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_dio.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_dio.disputeGameFactoryImpl()), + delayedWETHImpl: address(_dio.delayedWETHImpl()), + mipsImpl: address(_dio.mipsSingleton()) + }); vm.broadcast(msg.sender); - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) - }) + opcm_ = new OPContractsManager( + superchainConfigProxy, protocolVersionsProxy, _l1ContractsRelease, _blueprints, implementations ); - deployOPContractsManagerImpl(_dii, _dio); - OPContractsManager opcmImpl = _dio.opcmImpl(); - - OPContractsManager.InitializerInputs memory initializerInputs = - OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); - - vm.startBroadcast(msg.sender); - proxy.upgradeToAndCall(address(opcmImpl), abi.encodeCall(opcmImpl.initialize, (initializerInputs))); - - proxy.changeAdmin(address(opcmProxyOwner)); // transfer ownership of Proxy contract to the ProxyAdmin contract - vm.stopBroadcast(); - - opcmProxy_ = OPContractsManager(address(proxy)); + vm.label(address(opcm_), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm_)); } function deployOPContractsManager( @@ -585,72 +481,42 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); - - // First we deploy the blueprints for the singletons deployed by OPCM. - // forgefmt: disable-start - bytes32 salt = _dii.salt(); - OPContractsManager.Blueprints memory blueprints; - - vm.startBroadcast(msg.sender); - blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AddressManager")), salt); - blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("Proxy")), salt); - blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ProxyAdmin")), salt); - blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("L1ChugSplashProxy")), salt); - blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ResolvedDelegateProxy")), salt); - blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AnchorStateRegistry")), salt); - (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(vm.getCode("PermissionedDisputeGame"), salt); - vm.stopBroadcast(); - // forgefmt: disable-end - - OPContractsManager.ImplementationSetter[] memory setters = new OPContractsManager.ImplementationSetter[](9); - setters[0] = OPContractsManager.ImplementationSetter({ - name: "L1ERC721Bridge", - info: OPContractsManager.Implementation(address(_dio.l1ERC721BridgeImpl()), IL1ERC721Bridge.initialize.selector) - }); - setters[1] = OPContractsManager.ImplementationSetter({ - name: "OptimismPortal", - info: OPContractsManager.Implementation( - address(_dio.optimismPortalImpl()), IOptimismPortal2.initialize.selector - ) - }); - setters[2] = opcmSystemConfigSetter(_dii, _dio); - setters[3] = OPContractsManager.ImplementationSetter({ - name: "OptimismMintableERC20Factory", - info: OPContractsManager.Implementation( - address(_dio.optimismMintableERC20FactoryImpl()), IOptimismMintableERC20Factory.initialize.selector - ) - }); - setters[4] = l1CrossDomainMessengerConfigSetter(_dii, _dio); - setters[5] = l1StandardBridgeConfigSetter(_dii, _dio); - setters[6] = OPContractsManager.ImplementationSetter({ - name: "DisputeGameFactory", - info: OPContractsManager.Implementation( - address(_dio.disputeGameFactoryImpl()), IDisputeGameFactory.initialize.selector - ) - }); - setters[7] = OPContractsManager.ImplementationSetter({ - name: "DelayedWETH", - info: OPContractsManager.Implementation(address(_dio.delayedWETHImpl()), IDelayedWETH.initialize.selector) - }); - setters[8] = OPContractsManager.ImplementationSetter({ - name: "MIPS", - // MIPS is a singleton for all chains, so it doesn't need to be initialized, so the - // selector is just `bytes4(0)`. - info: OPContractsManager.Implementation(address(_dio.mipsSingleton()), bytes4(0)) - }); + string memory l1ContractsRelease = _dii.l1ContractsRelease(); + string memory stdVerToml = _dii.standardVersionsToml(); + string memory contractName = "op_contracts_manager"; + OPContractsManager opcm; - // This call contains a broadcast to deploy OPCM which is proxied. - OPContractsManager opcmProxy = createOPCMContract(_dii, _dio, blueprints, release, setters); + address existingImplementation = getReleaseAddress(l1ContractsRelease, contractName, stdVerToml); + if (existingImplementation != address(0)) { + opcm = OPContractsManager(existingImplementation); + } else { + // First we deploy the blueprints for the singletons deployed by OPCM. + // forgefmt: disable-start + bytes32 salt = _dii.salt(); + OPContractsManager.Blueprints memory blueprints; + + vm.startBroadcast(msg.sender); + blueprints.addressManager = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AddressManager")), salt); + blueprints.proxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("Proxy")), salt); + blueprints.proxyAdmin = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ProxyAdmin")), salt); + blueprints.l1ChugSplashProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("L1ChugSplashProxy")), salt); + blueprints.resolvedDelegateProxy = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("ResolvedDelegateProxy")), salt); + blueprints.anchorStateRegistry = deployBytecode(Blueprint.blueprintDeployerBytecode(vm.getCode("AnchorStateRegistry")), salt); + (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = deployBigBytecode(vm.getCode("PermissionedDisputeGame"), salt); + vm.stopBroadcast(); + // forgefmt: disable-end + + opcm = createOPCMContract(_dii, _dio, blueprints, l1ContractsRelease); + } - vm.label(address(opcmProxy), "OPContractsManager"); - _dio.set(_dio.opcmProxy.selector, address(opcmProxy)); + vm.label(address(opcm), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm)); } // --- Core Contracts --- function deploySystemConfigImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); // Using snake case for contract name to match the TOML file in superchain-registry. string memory contractName = "system_config"; @@ -659,7 +525,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = ISystemConfig(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { // Deploy a new implementation for development builds. vm.broadcast(msg.sender); impl = ISystemConfig( @@ -668,8 +534,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "SystemConfigImpl"); @@ -683,7 +547,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_cross_domain_messenger"; IL1CrossDomainMessenger impl; @@ -691,7 +555,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1CrossDomainMessenger(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1CrossDomainMessenger( DeployUtils.create1({ @@ -699,8 +563,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1CrossDomainMessengerImpl"); @@ -714,7 +576,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_erc721_bridge"; IL1ERC721Bridge impl; @@ -722,7 +584,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1ERC721Bridge(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1ERC721Bridge( DeployUtils.create1({ @@ -730,8 +592,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1ERC721BridgeImpl"); @@ -745,7 +605,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "l1_standard_bridge"; IL1StandardBridge impl; @@ -753,7 +613,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IL1StandardBridge(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IL1StandardBridge( DeployUtils.create1({ @@ -761,8 +621,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "L1StandardBridgeImpl"); @@ -776,7 +634,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_mintable_erc20_factory"; IOptimismMintableERC20Factory impl; @@ -784,7 +642,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismMintableERC20Factory(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IOptimismMintableERC20Factory( DeployUtils.create1({ @@ -792,32 +650,12 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismMintableERC20FactoryImpl"); _dio.set(_dio.optimismMintableERC20FactoryImpl.selector, address(impl)); } - function deployOPContractsManagerImpl( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - public - virtual - { - ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); - - vm.broadcast(msg.sender); - // TODO: Eventually we will want to select the correct implementation based on the release. - OPContractsManager impl = new OPContractsManager(superchainConfigProxy, protocolVersionsProxy); - - vm.label(address(impl), "OPContractsManagerImpl"); - _dio.set(_dio.opcmImpl.selector, address(impl)); - } - // --- Fault Proofs Contracts --- // The fault proofs contracts are configured as follows: @@ -862,7 +700,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; IOptimismPortal2 impl; @@ -870,7 +708,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismPortal2(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); @@ -884,8 +722,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismPortalImpl"); @@ -893,7 +729,7 @@ contract DeployImplementations is Script { } function deployDelayedWETHImpl(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "delayed_weth"; IDelayedWETH impl; @@ -901,7 +737,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IDelayedWETH(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 withdrawalDelaySeconds = _dii.withdrawalDelaySeconds(); vm.broadcast(msg.sender); impl = IDelayedWETH( @@ -912,8 +748,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "DelayedWETHImpl"); @@ -927,7 +761,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "preimage_oracle"; IPreimageOracle singleton; @@ -935,7 +769,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { singleton = IPreimageOracle(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 minProposalSizeBytes = _dii.minProposalSizeBytes(); uint256 challengePeriodSeconds = _dii.challengePeriodSeconds(); vm.broadcast(msg.sender); @@ -947,8 +781,6 @@ contract DeployImplementations is Script { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(singleton), "PreimageOracleSingleton"); @@ -956,7 +788,7 @@ contract DeployImplementations is Script { } function deployMipsSingleton(DeployImplementationsInput _dii, DeployImplementationsOutput _dio) public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "mips"; IMIPS singleton; @@ -964,18 +796,16 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { singleton = IMIPS(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 mipsVersion = _dii.mipsVersion(); IPreimageOracle preimageOracle = IPreimageOracle(address(_dio.preimageOracleSingleton())); vm.broadcast(msg.sender); singleton = IMIPS( DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", + _name: mipsVersion == 1 ? "MIPS" : "MIPS64", _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(singleton), "MIPSSingleton"); @@ -989,7 +819,7 @@ contract DeployImplementations is Script { public virtual { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "dispute_game_factory"; IDisputeGameFactory impl; @@ -997,7 +827,7 @@ contract DeployImplementations is Script { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IDisputeGameFactory(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = IDisputeGameFactory( DeployUtils.create1({ @@ -1005,8 +835,6 @@ contract DeployImplementations is Script { _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "DisputeGameFactoryImpl"); @@ -1076,11 +904,6 @@ contract DeployImplementations is Script { } } } - - // A release is considered a 'develop' release if it does not start with 'op-contracts'. - function isDevelopRelease(string memory _release) internal pure returns (bool) { - return !LibString.startsWith(_release, "op-contracts"); - } } // Similar to how DeploySuperchain.s.sol contains a lot of comments to thoroughly document the script @@ -1120,36 +943,35 @@ contract DeployImplementationsInterop is DeployImplementations { DeployImplementationsInput _dii, DeployImplementationsOutput _dio, OPContractsManager.Blueprints memory _blueprints, - string memory _release, - OPContractsManager.ImplementationSetter[] memory _setters + string memory _l1ContractsRelease ) internal + virtual override - returns (OPContractsManager opcmProxy_) + returns (OPContractsManager opcm_) { - address opcmProxyOwner = _dii.opcmProxyOwner(); + ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); + IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); + + OPContractsManager.Implementations memory implementations = OPContractsManager.Implementations({ + l1ERC721BridgeImpl: address(_dio.l1ERC721BridgeImpl()), + optimismPortalImpl: address(_dio.optimismPortalImpl()), + systemConfigImpl: address(_dio.systemConfigImpl()), + optimismMintableERC20FactoryImpl: address(_dio.optimismMintableERC20FactoryImpl()), + l1CrossDomainMessengerImpl: address(_dio.l1CrossDomainMessengerImpl()), + l1StandardBridgeImpl: address(_dio.l1StandardBridgeImpl()), + disputeGameFactoryImpl: address(_dio.disputeGameFactoryImpl()), + delayedWETHImpl: address(_dio.delayedWETHImpl()), + mipsImpl: address(_dio.mipsSingleton()) + }); vm.broadcast(msg.sender); - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (msg.sender))) - }) + opcm_ = new OPContractsManagerInterop( + superchainConfigProxy, protocolVersionsProxy, _l1ContractsRelease, _blueprints, implementations ); - deployOPContractsManagerImpl(_dii, _dio); // overriding function - OPContractsManager opcmImpl = _dio.opcmImpl(); - - OPContractsManager.InitializerInputs memory initializerInputs = - OPContractsManager.InitializerInputs(_blueprints, _setters, _release, true); - - vm.startBroadcast(msg.sender); - proxy.upgradeToAndCall(address(opcmImpl), abi.encodeCall(opcmImpl.initialize, (initializerInputs))); - - proxy.changeAdmin(opcmProxyOwner); // transfer ownership of Proxy contract to the ProxyAdmin contract - vm.stopBroadcast(); - - opcmProxy_ = OPContractsManagerInterop(address(proxy)); + vm.label(address(opcm_), "OPContractsManager"); + _dio.set(_dio.opcm.selector, address(opcm_)); } function deployOptimismPortalImpl( @@ -1159,7 +981,7 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "optimism_portal"; IOptimismPortalInterop impl; @@ -1167,7 +989,7 @@ contract DeployImplementationsInterop is DeployImplementations { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = IOptimismPortalInterop(payable(existingImplementation)); - } else if (isDevelopRelease(release)) { + } else { uint256 proofMaturityDelaySeconds = _dii.proofMaturityDelaySeconds(); uint256 disputeGameFinalityDelaySeconds = _dii.disputeGameFinalityDelaySeconds(); vm.broadcast(msg.sender); @@ -1182,8 +1004,6 @@ contract DeployImplementationsInterop is DeployImplementations { ) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "OptimismPortalImpl"); @@ -1197,7 +1017,7 @@ contract DeployImplementationsInterop is DeployImplementations { public override { - string memory release = _dii.release(); + string memory release = _dii.l1ContractsRelease(); string memory stdVerToml = _dii.standardVersionsToml(); string memory contractName = "system_config"; @@ -1206,7 +1026,7 @@ contract DeployImplementationsInterop is DeployImplementations { address existingImplementation = getReleaseAddress(release, contractName, stdVerToml); if (existingImplementation != address(0)) { impl = ISystemConfigInterop(existingImplementation); - } else if (isDevelopRelease(release)) { + } else { vm.broadcast(msg.sender); impl = ISystemConfigInterop( DeployUtils.create1({ @@ -1214,46 +1034,9 @@ contract DeployImplementationsInterop is DeployImplementations { _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfigInterop.__constructor__, ())) }) ); - } else { - revert(string.concat("DeployImplementations: failed to deploy release ", release)); } vm.label(address(impl), "SystemConfigImpl"); _dio.set(_dio.systemConfigImpl.selector, address(impl)); } - - function deployOPContractsManagerImpl( - DeployImplementationsInput _dii, - DeployImplementationsOutput _dio - ) - public - override - { - ISuperchainConfig superchainConfigProxy = _dii.superchainConfigProxy(); - IProtocolVersions protocolVersionsProxy = _dii.protocolVersionsProxy(); - - vm.broadcast(msg.sender); - // TODO: Eventually we will want to select the correct implementation based on the release. - OPContractsManager impl = new OPContractsManagerInterop(superchainConfigProxy, protocolVersionsProxy); - - vm.label(address(impl), "OPContractsManagerImpl"); - _dio.set(_dio.opcmImpl.selector, address(impl)); - } - - function opcmSystemConfigSetter( - DeployImplementationsInput, - DeployImplementationsOutput _dio - ) - internal - view - override - returns (OPContractsManager.ImplementationSetter memory) - { - return OPContractsManager.ImplementationSetter({ - name: "SystemConfig", - info: OPContractsManager.Implementation( - address(_dio.systemConfigImpl()), ISystemConfigInterop.initialize.selector - ) - }); - } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol index 5890630c9c2..09ef6d059cd 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol @@ -98,7 +98,7 @@ contract DeployMIPS is Script { vm.broadcast(msg.sender); singleton = IMIPS( DeployUtils.create1({ - _name: mipsVersion == 1 ? "MIPS" : "MIPS2", + _name: mipsVersion == 1 ? "MIPS" : "MIPS64", _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS.__constructor__, (preimageOracle))) }) ); diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol index eb3b346452e..a83dfb11062 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol @@ -47,7 +47,7 @@ contract DeployOPChainInput is BaseDeployIO { uint32 internal _basefeeScalar; uint32 internal _blobBaseFeeScalar; uint256 internal _l2ChainId; - OPContractsManager internal _opcmProxy; + OPContractsManager internal _opcm; string internal _saltMixer; uint64 internal _gasLimit; @@ -68,7 +68,7 @@ contract DeployOPChainInput is BaseDeployIO { else if (_sel == this.unsafeBlockSigner.selector) _unsafeBlockSigner = _addr; else if (_sel == this.proposer.selector) _proposer = _addr; else if (_sel == this.challenger.selector) _challenger = _addr; - else if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(_addr); + else if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else revert("DeployOPChainInput: unknown selector"); } @@ -174,11 +174,10 @@ contract DeployOPChainInput is BaseDeployIO { return abi.encode(ScriptConstants.DEFAULT_STARTING_ANCHOR_ROOTS()); } - function opcmProxy() public returns (OPContractsManager) { - require(address(_opcmProxy) != address(0), "DeployOPChainInput: not set"); - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_opcmProxy)); - return _opcmProxy; + function opcm() public view returns (OPContractsManager) { + require(address(_opcm) != address(0), "DeployOPChainInput: not set"); + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } function saltMixer() public view returns (string memory) { @@ -347,7 +346,7 @@ contract DeployOPChain is Script { // -------- Core Deployment Methods -------- function run(DeployOPChainInput _doi, DeployOPChainOutput _doo) public { - OPContractsManager opcmProxy = _doi.opcmProxy(); + OPContractsManager opcm = _doi.opcm(); OPContractsManager.Roles memory roles = OPContractsManager.Roles({ opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), @@ -374,7 +373,7 @@ contract DeployOPChain is Script { }); vm.broadcast(msg.sender); - OPContractsManager.DeployOutput memory deployOutput = opcmProxy.deploy(deployInput); + OPContractsManager.DeployOutput memory deployOutput = opcm.deploy(deployInput); vm.label(address(deployOutput.opChainProxyAdmin), "opChainProxyAdmin"); vm.label(address(deployOutput.addressManager), "addressManager"); @@ -480,9 +479,9 @@ contract DeployOPChain is Script { "DPG-20" ); - OPContractsManager opcm = _doi.opcmProxy(); - (address mips,) = opcm.implementations(opcm.latestRelease(), "MIPS"); - require(game.vm() == IBigStepper(mips), "DPG-30"); + OPContractsManager opcm = _doi.opcm(); + address mipsImpl = opcm.implementations().mipsImpl; + require(game.vm() == IBigStepper(mipsImpl), "DPG-30"); require(address(game.weth()) == address(_doo.delayedWETHPermissionedGameProxy()), "DPG-40"); require(address(game.anchorStateRegistry()) == address(_doo.anchorStateRegistryProxy()), "DPG-50"); @@ -552,9 +551,7 @@ contract DeployOPChain is Script { require(outputConfig.maximumBaseFee == rConfig.maximumBaseFee, "SYSCON-130"); require(systemConfig.startBlock() == block.number, "SYSCON-140"); - require( - systemConfig.batchInbox() == _doi.opcmProxy().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150" - ); + require(systemConfig.batchInbox() == _doi.opcm().chainIdToBatchInboxAddress(_doi.l2ChainId()), "SYSCON-150"); require(systemConfig.l1CrossDomainMessenger() == address(_doo.l1CrossDomainMessengerProxy()), "SYSCON-160"); require(systemConfig.l1ERC721Bridge() == address(_doo.l1ERC721BridgeProxy()), "SYSCON-170"); @@ -579,7 +576,7 @@ contract DeployOPChain is Script { require(address(messenger.PORTAL()) == address(_doo.optimismPortalProxy()), "L1xDM-30"); require(address(messenger.portal()) == address(_doo.optimismPortalProxy()), "L1xDM-40"); - require(address(messenger.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1xDM-50"); + require(address(messenger.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L1xDM-50"); bytes32 xdmSenderSlot = vm.load(address(messenger), bytes32(uint256(204))); require(address(uint160(uint256(xdmSenderSlot))) == Constants.DEFAULT_L2_SENDER, "L1xDM-60"); @@ -595,7 +592,7 @@ contract DeployOPChain is Script { require(address(bridge.messenger()) == address(messenger), "L1SB-20"); require(address(bridge.OTHER_BRIDGE()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-30"); require(address(bridge.otherBridge()) == Predeploys.L2_STANDARD_BRIDGE, "L1SB-40"); - require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L1SB-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L1SB-50"); } function assertValidOptimismMintableERC20Factory(DeployOPChainInput, DeployOPChainOutput _doo) internal { @@ -617,12 +614,12 @@ contract DeployOPChain is Script { require(address(bridge.MESSENGER()) == address(_doo.l1CrossDomainMessengerProxy()), "L721B-30"); require(address(bridge.messenger()) == address(_doo.l1CrossDomainMessengerProxy()), "L721B-40"); - require(address(bridge.superchainConfig()) == address(_doi.opcmProxy().superchainConfig()), "L721B-50"); + require(address(bridge.superchainConfig()) == address(_doi.opcm().superchainConfig()), "L721B-50"); } function assertValidOptimismPortal(DeployOPChainInput _doi, DeployOPChainOutput _doo) internal { IOptimismPortal2 portal = _doo.optimismPortalProxy(); - ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcmProxy().superchainConfig())); + ISuperchainConfig superchainConfig = ISuperchainConfig(address(_doi.opcm().superchainConfig())); require(address(portal.disputeGameFactory()) == address(_doo.disputeGameFactoryProxy()), "PORTAL-10"); require(address(portal.systemConfig()) == address(_doo.systemConfigProxy()), "PORTAL-20"); diff --git a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol index 74492556e1b..5e35e8848c8 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol @@ -60,9 +60,9 @@ import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; // we use variable names that are shorthand for the full contract names, for example: // - `dsi` for DeploySuperchainInput // - `dso` for DeploySuperchainOutput -// - `dio` for DeployImplementationsInput +// - `dii` for DeployImplementationsInput // - `dio` for DeployImplementationsOutput -// - `doo` for DeployOPChainInput +// - `doi` for DeployOPChainInput // - `doo` for DeployOPChainOutput // - etc. diff --git a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol index 3992cf1cb65..35aefbed16d 100644 --- a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol @@ -12,29 +12,18 @@ import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; import { IStaticL1ChugSplashProxy } from "src/legacy/interfaces/IL1ChugSplashProxy.sol"; contract ReadImplementationAddressesInput is DeployOPChainOutput { - OPContractsManager internal _opcmProxy; - string internal _release; + OPContractsManager internal _opcm; function set(bytes4 _sel, address _addr) public override { require(_addr != address(0), "ReadImplementationAddressesInput: cannot set zero address"); - if (_sel == this.opcmProxy.selector) _opcmProxy = OPContractsManager(_addr); + if (_sel == this.opcm.selector) _opcm = OPContractsManager(_addr); else if (_sel == this.addressManager.selector) _addressManager = IAddressManager(_addr); else super.set(_sel, _addr); } - function set(bytes4 _sel, string memory _val) public { - if (_sel == this.release.selector) _release = _val; - else revert("ReadImplementationAddressesInput: unknown selector"); - } - - function opcmProxy() public view returns (OPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcmProxy)); - return _opcmProxy; - } - - function release() public view returns (string memory) { - require(bytes(_release).length != 0, "ReadImplementationAddressesInput: release not set"); - return _release; + function opcm() public view returns (OPContractsManager) { + DeployUtils.assertValidContractAddress(address(_opcm)); + return _opcm; } } @@ -154,9 +143,12 @@ contract ReadImplementationAddresses is Script { vm.prank(address(0)); _rio.set(_rio.l1StandardBridge.selector, l1SBImpl); - (address mipsLogic,) = _rii.opcmProxy().implementations(_rii.release(), "MIPS"); + address mipsLogic = _rii.opcm().implementations().mipsImpl; _rio.set(_rio.mipsSingleton.selector, mipsLogic); + address delayedWETH = _rii.opcm().implementations().delayedWETHImpl; + _rio.set(_rio.delayedWETH.selector, delayedWETH); + IAddressManager am = _rii.addressManager(); _rio.set(_rio.l1CrossDomainMessenger.selector, am.getAddress("OVM_L1CrossDomainMessenger")); diff --git a/packages/contracts-bedrock/scripts/getting-started/wallets.sh b/packages/contracts-bedrock/scripts/getting-started/wallets.sh index 1d3ebfc6bbd..36a707c431c 100755 --- a/packages/contracts-bedrock/scripts/getting-started/wallets.sh +++ b/packages/contracts-bedrock/scripts/getting-started/wallets.sh @@ -10,18 +10,21 @@ wallet1=$(cast wallet new) wallet2=$(cast wallet new) wallet3=$(cast wallet new) wallet4=$(cast wallet new) +wallet5=$(cast wallet new) # Grab wallet addresses address1=$(echo "$wallet1" | awk '/Address/ { print $2 }') address2=$(echo "$wallet2" | awk '/Address/ { print $2 }') address3=$(echo "$wallet3" | awk '/Address/ { print $2 }') address4=$(echo "$wallet4" | awk '/Address/ { print $2 }') +address5=$(echo "$wallet5" | awk '/Address/ { print $2 }') # Grab wallet private keys key1=$(echo "$wallet1" | awk '/Private key/ { print $3 }') key2=$(echo "$wallet2" | awk '/Private key/ { print $3 }') key3=$(echo "$wallet3" | awk '/Private key/ { print $3 }') key4=$(echo "$wallet4" | awk '/Private key/ { print $3 }') +key5=$(echo "$wallet5" | awk '/Private key/ { print $3 }') # Print out the environment variables to copy echo "# Copy the following into your .envrc file:" @@ -41,3 +44,7 @@ echo echo "# Sequencer account" echo "export GS_SEQUENCER_ADDRESS=$address4" echo "export GS_SEQUENCER_PRIVATE_KEY=$key4" +echo +echo "# Challenger account" +echo "export GS_CHALLENGER_ADDRESS=$address5" +echo "export GS_CHALLENGER_PRIVATE_KEY=$key5" diff --git a/packages/contracts-bedrock/snapshots/abi/AttestationStation.json b/packages/contracts-bedrock/snapshots/abi/AttestationStation.json deleted file mode 100644 index ba7d5f9759e..00000000000 --- a/packages/contracts-bedrock/snapshots/abi/AttestationStation.json +++ /dev/null @@ -1,128 +0,0 @@ -[ - { - "inputs": [ - { - "components": [ - { - "internalType": "address", - "name": "about", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "key", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "val", - "type": "bytes" - } - ], - "internalType": "struct AttestationStation.AttestationData[]", - "name": "_attestations", - "type": "tuple[]" - } - ], - "name": "attest", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_about", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "_key", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "_val", - "type": "bytes" - } - ], - "name": "attest", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "attestations", - "outputs": [ - { - "internalType": "bytes", - "name": "", - "type": "bytes" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "creator", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "about", - "type": "address" - }, - { - "indexed": true, - "internalType": "bytes32", - "name": "key", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "val", - "type": "bytes" - } - ], - "name": "AttestationCreated", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json b/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json deleted file mode 100644 index d76d1c8b108..00000000000 --- a/packages/contracts-bedrock/snapshots/abi/DelayedVetoable.json +++ /dev/null @@ -1,207 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "address", - "name": "_vetoer", - "type": "address" - }, - { - "internalType": "address", - "name": "_initiator", - "type": "address" - }, - { - "internalType": "address", - "name": "_target", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_operatingDelay", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "delay", - "outputs": [ - { - "internalType": "uint256", - "name": "delay_", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "initiator", - "outputs": [ - { - "internalType": "address", - "name": "initiator_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_callHash", - "type": "bytes32" - } - ], - "name": "queuedAt", - "outputs": [ - { - "internalType": "uint256", - "name": "queuedAt_", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "target", - "outputs": [ - { - "internalType": "address", - "name": "target_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "vetoer", - "outputs": [ - { - "internalType": "address", - "name": "vetoer_", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "delay", - "type": "uint256" - } - ], - "name": "DelayActivated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Forwarded", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Initiated", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "bytes32", - "name": "callHash", - "type": "bytes32" - }, - { - "indexed": false, - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "Vetoed", - "type": "event" - }, - { - "inputs": [], - "name": "ForwardingEarly", - "type": "error" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "expected", - "type": "address" - }, - { - "internalType": "address", - "name": "actual", - "type": "address" - } - ], - "name": "Unauthorized", - "type": "error" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 7c478feb235..b5758eca610 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -10,6 +10,110 @@ "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" + }, + { + "internalType": "string", + "name": "_l1ContractsRelease", + "type": "string" + }, + { + "components": [ + { + "internalType": "address", + "name": "addressManager", + "type": "address" + }, + { + "internalType": "address", + "name": "proxy", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdmin", + "type": "address" + }, + { + "internalType": "address", + "name": "l1ChugSplashProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "resolvedDelegateProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Blueprints", + "name": "_blueprints", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Implementations", + "name": "_implementations", + "type": "tuple" } ], "stateMutability": "nonpayable", @@ -298,138 +402,68 @@ "type": "function" }, { - "inputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - }, - { - "internalType": "string", - "name": "", - "type": "string" - } - ], + "inputs": [], "name": "implementations", "outputs": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ { "components": [ { - "components": [ - { - "internalType": "address", - "name": "addressManager", - "type": "address" - }, - { - "internalType": "address", - "name": "proxy", - "type": "address" - }, - { - "internalType": "address", - "name": "proxyAdmin", - "type": "address" - }, - { - "internalType": "address", - "name": "l1ChugSplashProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "resolvedDelegateProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "anchorStateRegistry", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame1", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame2", - "type": "address" - } - ], - "internalType": "struct OPContractsManager.Blueprints", - "name": "blueprints", - "type": "tuple" + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" }, { - "components": [ - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "components": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "internalType": "struct OPContractsManager.Implementation", - "name": "info", - "type": "tuple" - } - ], - "internalType": "struct OPContractsManager.ImplementationSetter[]", - "name": "setters", - "type": "tuple[]" + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" }, { - "internalType": "string", - "name": "release", - "type": "string" + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" }, { - "internalType": "bool", - "name": "isLatest", - "type": "bool" + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" } ], - "internalType": "struct OPContractsManager.InitializerInputs", - "name": "_initializerInputs", + "internalType": "struct OPContractsManager.Implementations", + "name": "", "type": "tuple" } ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "view", "type": "function" }, { "inputs": [], - "name": "latestRelease", + "name": "l1ContractsRelease", "outputs": [ { "internalType": "string", @@ -529,19 +563,6 @@ "name": "Deployed", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json index 7c478feb235..b5758eca610 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInterop.json @@ -10,6 +10,110 @@ "internalType": "contract IProtocolVersions", "name": "_protocolVersions", "type": "address" + }, + { + "internalType": "string", + "name": "_l1ContractsRelease", + "type": "string" + }, + { + "components": [ + { + "internalType": "address", + "name": "addressManager", + "type": "address" + }, + { + "internalType": "address", + "name": "proxy", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdmin", + "type": "address" + }, + { + "internalType": "address", + "name": "l1ChugSplashProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "resolvedDelegateProxy", + "type": "address" + }, + { + "internalType": "address", + "name": "anchorStateRegistry", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame1", + "type": "address" + }, + { + "internalType": "address", + "name": "permissionedDisputeGame2", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Blueprints", + "name": "_blueprints", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" + } + ], + "internalType": "struct OPContractsManager.Implementations", + "name": "_implementations", + "type": "tuple" } ], "stateMutability": "nonpayable", @@ -298,138 +402,68 @@ "type": "function" }, { - "inputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - }, - { - "internalType": "string", - "name": "", - "type": "string" - } - ], + "inputs": [], "name": "implementations", "outputs": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ { "components": [ { - "components": [ - { - "internalType": "address", - "name": "addressManager", - "type": "address" - }, - { - "internalType": "address", - "name": "proxy", - "type": "address" - }, - { - "internalType": "address", - "name": "proxyAdmin", - "type": "address" - }, - { - "internalType": "address", - "name": "l1ChugSplashProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "resolvedDelegateProxy", - "type": "address" - }, - { - "internalType": "address", - "name": "anchorStateRegistry", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame1", - "type": "address" - }, - { - "internalType": "address", - "name": "permissionedDisputeGame2", - "type": "address" - } - ], - "internalType": "struct OPContractsManager.Blueprints", - "name": "blueprints", - "type": "tuple" + "internalType": "address", + "name": "l1ERC721BridgeImpl", + "type": "address" }, { - "components": [ - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "components": [ - { - "internalType": "address", - "name": "logic", - "type": "address" - }, - { - "internalType": "bytes4", - "name": "initializer", - "type": "bytes4" - } - ], - "internalType": "struct OPContractsManager.Implementation", - "name": "info", - "type": "tuple" - } - ], - "internalType": "struct OPContractsManager.ImplementationSetter[]", - "name": "setters", - "type": "tuple[]" + "internalType": "address", + "name": "optimismPortalImpl", + "type": "address" }, { - "internalType": "string", - "name": "release", - "type": "string" + "internalType": "address", + "name": "systemConfigImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1StandardBridgeImpl", + "type": "address" }, { - "internalType": "bool", - "name": "isLatest", - "type": "bool" + "internalType": "address", + "name": "disputeGameFactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "delayedWETHImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "mipsImpl", + "type": "address" } ], - "internalType": "struct OPContractsManager.InitializerInputs", - "name": "_initializerInputs", + "internalType": "struct OPContractsManager.Implementations", + "name": "", "type": "tuple" } ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", + "stateMutability": "view", "type": "function" }, { "inputs": [], - "name": "latestRelease", + "name": "l1ContractsRelease", "outputs": [ { "internalType": "string", @@ -529,19 +563,6 @@ "name": "Deployed", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/Optimist.json b/packages/contracts-bedrock/snapshots/abi/Optimist.json deleted file mode 100644 index 96bbc0591a3..00000000000 --- a/packages/contracts-bedrock/snapshots/abi/Optimist.json +++ /dev/null @@ -1,536 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - }, - { - "internalType": "string", - "name": "_symbol", - "type": "string" - }, - { - "internalType": "address", - "name": "_baseURIAttestor", - "type": "address" - }, - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - }, - { - "internalType": "contract OptimistAllowlist", - "name": "_optimistAllowlist", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "BASE_URI_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "BASE_URI_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_ALLOWLIST", - "outputs": [ - { - "internalType": "contract OptimistAllowlist", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "name": "approve", - "outputs": [], - "stateMutability": "pure", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "owner", - "type": "address" - } - ], - "name": "balanceOf", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "baseURI", - "outputs": [ - { - "internalType": "string", - "name": "uri_", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "burn", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "getApproved", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - }, - { - "internalType": "string", - "name": "_symbol", - "type": "string" - } - ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "internalType": "address", - "name": "operator", - "type": "address" - } - ], - "name": "isApprovedForAll", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_recipient", - "type": "address" - } - ], - "name": "isOnAllowList", - "outputs": [ - { - "internalType": "bool", - "name": "allowed_", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_recipient", - "type": "address" - } - ], - "name": "mint", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "name", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "ownerOf", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "safeTransferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - }, - { - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "name": "safeTransferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "name": "setApprovalForAll", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes4", - "name": "interfaceId", - "type": "bytes4" - } - ], - "name": "supportsInterface", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "symbol", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_owner", - "type": "address" - } - ], - "name": "tokenIdOfAddress", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "pure", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "_tokenId", - "type": "uint256" - } - ], - "name": "tokenURI", - "outputs": [ - { - "internalType": "string", - "name": "uri_", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "transferFrom", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "approved", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "Approval", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "owner", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "operator", - "type": "address" - }, - { - "indexed": false, - "internalType": "bool", - "name": "approved", - "type": "bool" - } - ], - "name": "ApprovalForAll", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "from", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "to", - "type": "address" - }, - { - "indexed": true, - "internalType": "uint256", - "name": "tokenId", - "type": "uint256" - } - ], - "name": "Transfer", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json b/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json deleted file mode 100644 index 87ac8f8a014..00000000000 --- a/packages/contracts-bedrock/snapshots/abi/OptimistAllowlist.json +++ /dev/null @@ -1,138 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - }, - { - "internalType": "address", - "name": "_allowlistAttestor", - "type": "address" - }, - { - "internalType": "address", - "name": "_coinbaseQuestAttestor", - "type": "address" - }, - { - "internalType": "address", - "name": "_optimistInviter", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ALLOWLIST_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "COINBASE_QUEST_ATTESTOR", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_CAN_MINT_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "OPTIMIST_INVITER", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_claimer", - "type": "address" - } - ], - "name": "isAllowedToMint", - "outputs": [ - { - "internalType": "bool", - "name": "allowed_", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json b/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json deleted file mode 100644 index a5300b20a3c..00000000000 --- a/packages/contracts-bedrock/snapshots/abi/OptimistInviter.json +++ /dev/null @@ -1,282 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "address", - "name": "_inviteGranter", - "type": "address" - }, - { - "internalType": "contract AttestationStation", - "name": "_attestationStation", - "type": "address" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [], - "name": "ATTESTATION_STATION", - "outputs": [ - { - "internalType": "contract AttestationStation", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "CAN_INVITE_ATTESTATION_KEY", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "CLAIMABLE_INVITE_TYPEHASH", - "outputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "EIP712_VERSION", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "INVITE_GRANTER", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "MIN_COMMITMENT_PERIOD", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "_claimer", - "type": "address" - }, - { - "components": [ - { - "internalType": "address", - "name": "issuer", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "nonce", - "type": "bytes32" - } - ], - "internalType": "struct OptimistInviter.ClaimableInvite", - "name": "_claimableInvite", - "type": "tuple" - }, - { - "internalType": "bytes", - "name": "_signature", - "type": "bytes" - } - ], - "name": "claimInvite", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "_commitment", - "type": "bytes32" - } - ], - "name": "commitInvite", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "commitmentTimestamps", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "string", - "name": "_name", - "type": "string" - } - ], - "name": "initialize", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "inviteCounts", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address[]", - "name": "_accounts", - "type": "address[]" - }, - { - "internalType": "uint256", - "name": "_inviteCount", - "type": "uint256" - } - ], - "name": "setInviteCounts", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "name": "usedNonces", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "version", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint8", - "name": "version", - "type": "uint8" - } - ], - "name": "Initialized", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "issuer", - "type": "address" - }, - { - "indexed": true, - "internalType": "address", - "name": "claimer", - "type": "address" - } - ], - "name": "InviteClaimed", - "type": "event" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/RISCV.json b/packages/contracts-bedrock/snapshots/abi/RISCV.json new file mode 100644 index 00000000000..1650fd3980e --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/RISCV.json @@ -0,0 +1,68 @@ +[ + { + "inputs": [ + { + "internalType": "contract IPreimageOracle", + "name": "_oracle", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "oracle", + "outputs": [ + { + "internalType": "contract IPreimageOracle", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + }, + { + "internalType": "bytes32", + "name": "_localContext", + "type": "bytes32" + } + ], + "name": "step", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index fb7b6d82439..d069a275393 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -1,11 +1,7 @@ { "src/L1/DataAvailabilityChallenge.sol": { - "initCodeHash": "0xbd00d6568abab3e7fc211c40d682862242f25493010a4a097bd1f3b45c8c87c3", - "sourceCodeHash": "0x58b587034a67b4bb718abbaded8ac23b082c0971105874bcc42c23f051c67f6e" - }, - "src/L1/DelayedVetoable.sol": { - "initCodeHash": "0x9fe8ade6f6332262ff1f3539ac0bf57660edbad3cf4c4cb230c2ddac18aa0a3f", - "sourceCodeHash": "0x30e83a535ef27b2e900c831c4e1a4ec2750195350011c4fdacda1da9db2d167b" + "initCodeHash": "0x240a9b695e1ab73692672b907c2ae147ee9224e95a03c02d99333afe186c3d2f", + "sourceCodeHash": "0xc6ab0e64cfbdcfa6de0a480426263712b3ddbabe56a88ec7c02556cb432e6b02" }, "src/L1/L1CrossDomainMessenger.sol": { "initCodeHash": "0x2e9cb3ceb5e55341b311f0666ef7655df4fafae75afdfbcd701cd9c9b2b017d5", @@ -24,8 +20,8 @@ "sourceCodeHash": "0x4132ff37d267cb12224b75ea806c0aa7d25407b0d66ce526d7fcda8f7d223882" }, "src/L1/OPContractsManager.sol": { - "initCodeHash": "0xd58cb3978affc5c1457cdd498ff8420c90aef804d4c3b62cf42ab2691986d6d2", - "sourceCodeHash": "0x7bfa6eff76176649fe600303cd60009a0f6e282cbaec55836b5ea1f8875cbeb5" + "initCodeHash": "0xd038cc35325d023499151264232d75fa4ecc81f04a8c8353e6b50c43af224d6e", + "sourceCodeHash": "0xa13f3ab2b8744015290dbabe5f20fdd44774607e6a7ad3e5e016303fc4aa8c12" }, "src/L1/OptimismPortal.sol": { "initCodeHash": "0x152167cfa18635ae4918a6eb3371a599cfa084418c0a652799cdb48bfc0ee0cc", @@ -48,12 +44,12 @@ "sourceCodeHash": "0x39489a85bc3a5c8560f82d41b31bf7fe22f5b648f4ed538f61695a73092ea9eb" }, "src/L1/SystemConfig.sol": { - "initCodeHash": "0x429058f75d97fa7a7d0166b59830909bc722324feefc40f2b41419d6335d3f37", - "sourceCodeHash": "0x5ca776041a4ddc0d28ec55db7012d669481cd4601b0e71dbd3493a67b8a7e5a5" + "initCodeHash": "0x0eda38e2fb2687a324289f04ec8ad0d2afe51f45219d074740fb4a0e24ea6569", + "sourceCodeHash": "0x6dbbe8716ca8cd2fba3da8dcae0ca0c4b1f3e9dd04220fb24a15666b23300927" }, "src/L1/SystemConfigInterop.sol": { - "initCodeHash": "0x277a61dcabed81a15739a8e9ed50615252bcc687cebea852e00191d0a1fbe11f", - "sourceCodeHash": "0x38361a4f70a19e1b7819e933932a0c9fd2bcebaaebcbc7942f5c00dfaa2c28df" + "initCodeHash": "0x443fd84f8dbc38f03e59a56b99099b5e4b28de3e860a5d16c1a21101745622a4", + "sourceCodeHash": "0x5c2e00cd8939a538eb38580d76e70d27dd1e8e6cd9328e1978468981017736e6" }, "src/L2/BaseFeeVault.sol": { "initCodeHash": "0xbf49824cf37e201181484a8a423fcad8f504dc925921a2b28e83398197858dec", @@ -139,6 +135,10 @@ "initCodeHash": "0x17ea1b1c5d5a622d51c2961fde886a5498de63584e654ed1d69ee80dddbe0b17", "sourceCodeHash": "0x0fa0633a769e73f5937514c0003ba7947a1c275bbe5b85d78879c42f0ed8895b" }, + "src/asterisc/RISCV.sol": { + "initCodeHash": "0x6b4323061187f2c8efe8de43bf1ecdc0798e2d95ad69470ed4151dadc094fedf", + "sourceCodeHash": "0xd824f1ead87a1214fa8a4b435f493a80b7340ec2c959d2c1e1e9e8c062a42c4a" + }, "src/cannon/MIPS.sol": { "initCodeHash": "0xa3cbf121bad13c00227ea4fef128853d9a86b7ec9158de894f99b58d38d7630a", "sourceCodeHash": "0xd8467700c80b3e62fa37193dc6513bac35282094b686b50e162e157f704dde00" @@ -183,22 +183,6 @@ "initCodeHash": "0xefc6ed9e325c2d614ea0d28c3eabfff1b345f7c6054e90253c6a091c29508267", "sourceCodeHash": "0xaa08a61448f485b277af57251d2089cc6a80ce0a763bf7184d48ffed5034ef69" }, - "src/periphery/op-nft/AttestationStation.sol": { - "initCodeHash": "0x2e665d9ee554430980f64bcb6d2611a1cb03dbacfd58bb0d6f5d32951a267bde", - "sourceCodeHash": "0xe0bc805b22c7d04b5a9444cddd4c0e1bcb3006c69c03610494277ab2cc83f553" - }, - "src/periphery/op-nft/Optimist.sol": { - "initCodeHash": "0x8fccdef5fb6e6d51215b39acc449faad8ba15416699c9b3af77866f4297805a3", - "sourceCodeHash": "0xfa9354827b642803e10415ed30ca789be1bd23d88fac14f7adaa65c6eb1c1643" - }, - "src/periphery/op-nft/OptimistAllowlist.sol": { - "initCodeHash": "0x166dd3fc18cb238895f2faa7fdd635af48ce2c54e21ed2d6dae857c3731c4d6c", - "sourceCodeHash": "0x3a5f61046f729c9a70274b8b2a739382987ec5eb77705b259e8a3210a5f43462" - }, - "src/periphery/op-nft/OptimistInviter.sol": { - "initCodeHash": "0x28dfa6676702a7abd19609cc773158d1f958210bc0a38c008d67a002dc1df862", - "sourceCodeHash": "0x3a0a294932d6deba043f6a2b46b4e8477ee96e7fb054d7e7229a43ce4352c68d" - }, "src/safe/DeputyGuardianModule.sol": { "initCodeHash": "0xd95e562f395d4eb6e332f4474dffab660ada9e9da7c79f58fb6052278e0904df", "sourceCodeHash": "0x45daabe094de0287e244e6fea4f1887b9adc09b07c47dc77361b1678645a1470" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json b/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json deleted file mode 100644 index c3c732cec14..00000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/AttestationStation.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "bytes": "32", - "label": "attestations", - "offset": 0, - "slot": "0", - "type": "mapping(address => mapping(address => mapping(bytes32 => bytes)))" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json b/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json deleted file mode 100644 index 7da3cbbe5bd..00000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/DelayedVetoable.json +++ /dev/null @@ -1,16 +0,0 @@ -[ - { - "bytes": "32", - "label": "_delay", - "offset": 0, - "slot": "0", - "type": "uint256" - }, - { - "bytes": "32", - "label": "_queuedAt", - "offset": 0, - "slot": "1", - "type": "mapping(bytes32 => uint256)" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index aeef539c5c2..aa8148b34cb 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -1,51 +1,30 @@ [ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, { "bytes": "32", - "label": "latestRelease", + "label": "l1ContractsRelease", "offset": 0, - "slot": "1", + "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "implementations", - "offset": 0, - "slot": "2", - "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" - }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "3", + "slot": "1", "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "4", + "slot": "2", "type": "struct OPContractsManager.Blueprints" }, { - "bytes": "1600", - "label": "__gap", + "bytes": "288", + "label": "implementation", "offset": 0, - "slot": "12", - "type": "uint256[50]" + "slot": "10", + "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json index aeef539c5c2..aa8148b34cb 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerInterop.json @@ -1,51 +1,30 @@ [ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, { "bytes": "32", - "label": "latestRelease", + "label": "l1ContractsRelease", "offset": 0, - "slot": "1", + "slot": "0", "type": "string" }, - { - "bytes": "32", - "label": "implementations", - "offset": 0, - "slot": "2", - "type": "mapping(string => mapping(string => struct OPContractsManager.Implementation))" - }, { "bytes": "32", "label": "systemConfigs", "offset": 0, - "slot": "3", + "slot": "1", "type": "mapping(uint256 => contract ISystemConfig)" }, { "bytes": "256", "label": "blueprint", "offset": 0, - "slot": "4", + "slot": "2", "type": "struct OPContractsManager.Blueprints" }, { - "bytes": "1600", - "label": "__gap", + "bytes": "288", + "label": "implementation", "offset": 0, - "slot": "12", - "type": "uint256[50]" + "slot": "10", + "type": "struct OPContractsManager.Implementations" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json b/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json deleted file mode 100644 index 6049beb5424..00000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/Optimist.json +++ /dev/null @@ -1,86 +0,0 @@ -[ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "1", - "type": "uint256[50]" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "51", - "type": "uint256[50]" - }, - { - "bytes": "32", - "label": "_name", - "offset": 0, - "slot": "101", - "type": "string" - }, - { - "bytes": "32", - "label": "_symbol", - "offset": 0, - "slot": "102", - "type": "string" - }, - { - "bytes": "32", - "label": "_owners", - "offset": 0, - "slot": "103", - "type": "mapping(uint256 => address)" - }, - { - "bytes": "32", - "label": "_balances", - "offset": 0, - "slot": "104", - "type": "mapping(address => uint256)" - }, - { - "bytes": "32", - "label": "_tokenApprovals", - "offset": 0, - "slot": "105", - "type": "mapping(uint256 => address)" - }, - { - "bytes": "32", - "label": "_operatorApprovals", - "offset": 0, - "slot": "106", - "type": "mapping(address => mapping(address => bool))" - }, - { - "bytes": "1408", - "label": "__gap", - "offset": 0, - "slot": "107", - "type": "uint256[44]" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "151", - "type": "uint256[50]" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json deleted file mode 100644 index 0637a088a01..00000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimistAllowlist.json +++ /dev/null @@ -1 +0,0 @@ -[] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json deleted file mode 100644 index 5d1a6bbc43c..00000000000 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimistInviter.json +++ /dev/null @@ -1,58 +0,0 @@ -[ - { - "bytes": "1", - "label": "_initialized", - "offset": 0, - "slot": "0", - "type": "uint8" - }, - { - "bytes": "1", - "label": "_initializing", - "offset": 1, - "slot": "0", - "type": "bool" - }, - { - "bytes": "32", - "label": "_HASHED_NAME", - "offset": 0, - "slot": "1", - "type": "bytes32" - }, - { - "bytes": "32", - "label": "_HASHED_VERSION", - "offset": 0, - "slot": "2", - "type": "bytes32" - }, - { - "bytes": "1600", - "label": "__gap", - "offset": 0, - "slot": "3", - "type": "uint256[50]" - }, - { - "bytes": "32", - "label": "commitmentTimestamps", - "offset": 0, - "slot": "53", - "type": "mapping(bytes32 => uint256)" - }, - { - "bytes": "32", - "label": "usedNonces", - "offset": 0, - "slot": "54", - "type": "mapping(address => mapping(bytes32 => bool))" - }, - { - "bytes": "32", - "label": "inviteCounts", - "offset": 0, - "slot": "55", - "type": "mapping(address => uint256)" - } -] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json b/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json new file mode 100644 index 00000000000..a79dc13a1d3 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/RISCV.json @@ -0,0 +1,9 @@ +[ + { + "bytes": "20", + "label": "oracle", + "offset": 0, + "slot": "0", + "type": "contract IPreimageOracle" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol index 2a725fc4f20..666e4306b48 100644 --- a/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol +++ b/packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol @@ -24,9 +24,10 @@ enum CommitmentType { } /// @dev A struct representing a single DA challenge. -/// @custom:field status The status of the challenge. /// @custom:field challenger The address that initiated the challenge. +/// @custom:field lockedBond The amount of ETH bond that was locked by the challenger. /// @custom:field startBlock The block number at which the challenge was initiated. +/// @custom:field resolvedBlock The block number at which the challenge was resolved. struct Challenge { address challenger; uint256 lockedBond; @@ -94,8 +95,8 @@ contract DataAvailabilityChallenge is OwnableUpgradeable, ISemver { event BalanceChanged(address account, uint256 balance); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.2 - string public constant version = "1.0.1-beta.2"; + /// @custom:semver 1.0.1-beta.3 + string public constant version = "1.0.1-beta.3"; /// @notice The fixed cost of resolving a challenge. /// @dev The value is estimated by measuring the cost of resolving with `bytes(0)` diff --git a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol b/packages/contracts-bedrock/src/L1/DelayedVetoable.sol deleted file mode 100644 index d968af21497..00000000000 --- a/packages/contracts-bedrock/src/L1/DelayedVetoable.sol +++ /dev/null @@ -1,193 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Interfaces -import { ISemver } from "src/universal/interfaces/ISemver.sol"; - -/// @title DelayedVetoable -/// @notice This contract enables a delay before a call is forwarded to a target contract, and during the delay period -/// the call can be vetoed by the authorized vetoer. -/// This contract does not support value transfers, only data is forwarded. -/// Additionally, this contract cannot be used to forward calls with data beginning with the function selector -/// of the queuedAt(bytes32) function. This is because of input validation checks which solidity performs at -/// runtime on functions which take an argument. -contract DelayedVetoable is ISemver { - /// @notice Error for when attempting to forward too early. - error ForwardingEarly(); - - /// @notice Error for unauthorized calls. - error Unauthorized(address expected, address actual); - - /// @notice An event that is emitted when the delay is activated. - /// @param delay The delay that was activated. - event DelayActivated(uint256 delay); - - /// @notice An event that is emitted when a call is initiated. - /// @param callHash The hash of the call data. - /// @param data The data of the initiated call. - event Initiated(bytes32 indexed callHash, bytes data); - - /// @notice An event that is emitted each time a call is forwarded. - /// @param callHash The hash of the call data. - /// @param data The data forwarded to the target. - event Forwarded(bytes32 indexed callHash, bytes data); - - /// @notice An event that is emitted each time a call is vetoed. - /// @param callHash The hash of the call data. - /// @param data The data forwarded to the target. - event Vetoed(bytes32 indexed callHash, bytes data); - - /// @notice The address that all calls are forwarded to after the delay. - address internal immutable TARGET; - - /// @notice The address that can veto a call. - address internal immutable VETOER; - - /// @notice The address that can initiate a call. - address internal immutable INITIATOR; - - /// @notice The delay which will be set after the initial system deployment is completed. - uint256 internal immutable OPERATING_DELAY; - - /// @notice The current amount of time to wait before forwarding a call. - uint256 internal _delay; - - /// @notice The time that a call was initiated. - mapping(bytes32 => uint256) internal _queuedAt; - - /// @notice A modifier that reverts if not called by the vetoer or by address(0) to allow - /// eth_call to interact with this proxy without needing to use low-level storage - /// inspection. We assume that nobody is able to trigger calls from address(0) during - /// normal EVM execution. - modifier readOrHandle() { - if (msg.sender == address(0)) { - _; - } else { - // This WILL halt the call frame on completion. - _handleCall(); - } - } - - /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.2 - string public constant version = "1.0.1-beta.2"; - - /// @notice Sets the target admin during contract deployment. - /// @param _vetoer Address of the vetoer. - /// @param _initiator Address of the initiator. - /// @param _target Address of the target. - /// @param _operatingDelay Time to delay when the system is operational. - constructor(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) { - // Note that the _delay value is not set here. Having an initial delay of 0 is helpful - // during the deployment of a new system. - VETOER = _vetoer; - INITIATOR = _initiator; - TARGET = _target; - OPERATING_DELAY = _operatingDelay; - } - - /// @notice Gets the initiator - /// @return initiator_ Initiator address. - function initiator() external virtual readOrHandle returns (address initiator_) { - initiator_ = INITIATOR; - } - - //// @notice Queries the vetoer address. - /// @return vetoer_ Vetoer address. - function vetoer() external virtual readOrHandle returns (address vetoer_) { - vetoer_ = VETOER; - } - - //// @notice Queries the target address. - /// @return target_ Target address. - function target() external readOrHandle returns (address target_) { - target_ = TARGET; - } - - /// @notice Gets the delay - /// @return delay_ Delay address. - function delay() external readOrHandle returns (uint256 delay_) { - delay_ = _delay; - } - - /// @notice Gets entries in the _queuedAt mapping. - /// @param _callHash The hash of the call data. - /// @return queuedAt_ The time the callHash was recorded. - function queuedAt(bytes32 _callHash) external readOrHandle returns (uint256 queuedAt_) { - queuedAt_ = _queuedAt[_callHash]; - } - - /// @notice Used for all calls that pass data to the contract. - fallback() external { - _handleCall(); - } - - /// @notice Receives all calls other than those made by the vetoer. - /// This enables transparent initiation and forwarding of calls to the target and avoids - /// the need for additional layers of abi encoding. - function _handleCall() internal { - // The initiator and vetoer activate the delay by passing in null data. - if (msg.data.length == 0 && _delay == 0) { - if (msg.sender != INITIATOR && msg.sender != VETOER) { - revert Unauthorized(INITIATOR, msg.sender); - } - _delay = OPERATING_DELAY; - emit DelayActivated(_delay); - return; - } - - bytes32 callHash = keccak256(msg.data); - - // Case 1: The initiator is calling the contract to initiate a call. - if (msg.sender == INITIATOR && _queuedAt[callHash] == 0) { - if (_delay == 0) { - // This forward function will halt the call frame on completion. - _forwardAndHalt(callHash); - } - _queuedAt[callHash] = block.timestamp; - emit Initiated(callHash, msg.data); - return; - } - - // Case 2: The vetoer is calling the contract to veto a call. - // Note: The vetoer retains the ability to veto even after the delay has passed. This makes censoring the vetoer - // more costly, as there is no time limit after which their transaction can be included. - if (msg.sender == VETOER && _queuedAt[callHash] != 0) { - delete _queuedAt[callHash]; - emit Vetoed(callHash, msg.data); - return; - } - - // Case 3: The call is from an unpermissioned actor. We'll forward the call if the delay has - // passed. - if (_queuedAt[callHash] == 0) { - // The call has not been initiated, so we'll treat this is an unauthorized initiation attempt. - revert Unauthorized(INITIATOR, msg.sender); - } - - if (_queuedAt[callHash] + _delay > block.timestamp) { - // Not enough time has passed, so we'll revert. - revert ForwardingEarly(); - } - - // Delete the call to prevent replays - delete _queuedAt[callHash]; - _forwardAndHalt(callHash); - } - - /// @notice Forwards the call to the target and halts the call frame. - function _forwardAndHalt(bytes32 _callHash) internal { - // Forward the call - emit Forwarded(_callHash, msg.data); - (bool success, bytes memory returndata) = TARGET.call(msg.data); - if (success == true) { - assembly { - return(add(returndata, 0x20), mload(returndata)) - } - } else { - assembly { - revert(add(returndata, 0x20), mload(returndata)) - } - } - } -} diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 4bf52ff228a..a2a5a5f215b 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -4,15 +4,12 @@ pragma solidity 0.8.15; import { Blueprint } from "src/libraries/Blueprint.sol"; import { Constants } from "src/libraries/Constants.sol"; -import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; - import { ISemver } from "src/universal/interfaces/ISemver.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; import { IDelayedWETH } from "src/dispute/interfaces/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "src/dispute/interfaces/IAnchorStateRegistry.sol"; import { IDisputeGame } from "src/dispute/interfaces/IDisputeGame.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IAddressManager } from "src/legacy/interfaces/IAddressManager.sol"; import { IProxyAdmin } from "src/universal/interfaces/IProxyAdmin.sol"; @@ -28,14 +25,12 @@ import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; -import { ISystemConfigV160 } from "src/L1/interfaces/ISystemConfigV160.sol"; import { IL1CrossDomainMessenger } from "src/L1/interfaces/IL1CrossDomainMessenger.sol"; import { IL1ERC721Bridge } from "src/L1/interfaces/IL1ERC721Bridge.sol"; import { IL1StandardBridge } from "src/L1/interfaces/IL1StandardBridge.sol"; import { IOptimismMintableERC20Factory } from "src/universal/interfaces/IOptimismMintableERC20Factory.sol"; -/// @custom:proxied true -contract OPContractsManager is ISemver, Initializable { +contract OPContractsManager is ISemver { // -------- Structs -------- /// @notice Represents the roles that can be set when deploying a standard OP Stack chain. @@ -89,19 +84,6 @@ contract OPContractsManager is ISemver, Initializable { IDelayedWETH delayedWETHPermissionlessGameProxy; } - /// @notice The logic address and initializer selector for an implementation contract. - struct Implementation { - address logic; // Address containing the deployed logic contract. - bytes4 initializer; // Function selector for the initializer. - } - - /// @notice Used to set the implementation for a contract by mapping a contract - /// name to the implementation data. - struct ImplementationSetter { - string name; // Contract name. - Implementation info; // Implementation to set. - } - /// @notice Addresses of ERC-5202 Blueprint contracts. There are used for deploying full size /// contracts, to reduce the code size of this factory contract. If it deployed full contracts /// using the `new Proxy()` syntax, the code size would get large fast, since this contract would @@ -118,19 +100,23 @@ contract OPContractsManager is ISemver, Initializable { address permissionedDisputeGame2; } - /// @notice Inputs required when initializing the OPContractsManager. To avoid 'StackTooDeep' errors, - /// all necessary inputs (excluding immutables) for initialization are bundled together in this struct. - struct InitializerInputs { - Blueprints blueprints; - ImplementationSetter[] setters; - string release; - bool isLatest; + /// @notice The latest implementation contracts for the OP Stack. + struct Implementations { + address l1ERC721BridgeImpl; + address optimismPortalImpl; + address systemConfigImpl; + address optimismMintableERC20FactoryImpl; + address l1CrossDomainMessengerImpl; + address l1StandardBridgeImpl; + address disputeGameFactoryImpl; + address delayedWETHImpl; + address mipsImpl; } // -------- Constants and Variables -------- - /// @custom:semver 1.0.0-beta.20 - string public constant version = "1.0.0-beta.20"; + /// @custom:semver 1.0.0-beta.21 + string public constant version = "1.0.0-beta.21"; /// @notice Represents the interface version so consumers know how to decode the DeployOutput struct /// that's emitted in the `Deployed` event. Whenever that struct changes, a new version should be used. @@ -142,24 +128,20 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Address of the ProtocolVersions contract shared by all chains. IProtocolVersions public immutable protocolVersions; - /// @notice The latest release of the OP Contracts Manager, as a string of the format `op-contracts/vX.Y.Z`. - string public latestRelease; - - /// @notice Maps a release version to a contract name to it's implementation data. - mapping(string => mapping(string => Implementation)) public implementations; + // @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which version + // of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. + string public l1ContractsRelease; /// @notice Maps an L2 Chain ID to the SystemConfig for that chain. mapping(uint256 => ISystemConfig) public systemConfigs; /// @notice Addresses of the Blueprint contracts. /// This is internal because if public the autogenerated getter method would return a tuple of - /// addresses, but we want it to return a struct. This is also set via `initialize` because - /// we can't make this an immutable variable as it is a non-value type. + /// addresses, but we want it to return a struct. Blueprints internal blueprint; - /// @notice Storage gap for future modifications, so we can expand the number of blueprints - /// without affecting other storage variables. - uint256[50] private __gap; + /// @notice Addresses of the latest implementation contracts. + Implementations internal implementation; // -------- Events -------- @@ -197,37 +179,26 @@ contract OPContractsManager is ISemver, Initializable { // -------- Methods -------- - /// @notice OPCM is proxied. Therefore the `initialize` function replaces most constructor logic for this contract. - - constructor(ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions) { + constructor( + ISuperchainConfig _superchainConfig, + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations + ) { assertValidContractAddress(address(_superchainConfig)); assertValidContractAddress(address(_protocolVersions)); superchainConfig = _superchainConfig; protocolVersions = _protocolVersions; - _disableInitializers(); - } - - function initialize(InitializerInputs memory _initializerInputs) public initializer { - if (_initializerInputs.isLatest) latestRelease = _initializerInputs.release; - if (keccak256(bytes(latestRelease)) == keccak256("")) revert LatestReleaseNotSet(); + l1ContractsRelease = _l1ContractsRelease; - for (uint256 i = 0; i < _initializerInputs.setters.length; i++) { - ImplementationSetter memory setter = _initializerInputs.setters[i]; - Implementation storage impl = implementations[_initializerInputs.release][setter.name]; - if (impl.logic != address(0)) revert AlreadyReleased(); - - impl.initializer = setter.info.initializer; - impl.logic = setter.info.logic; - } - - blueprint = _initializerInputs.blueprints; + blueprint = _blueprints; + implementation = _implementations; } function deploy(DeployInput calldata _input) external returns (DeployOutput memory) { assertValidInputs(_input); - uint256 l2ChainId = _input.l2ChainId; - // The salt for a non-proxy contract is a function of the chain ID and the salt mixer. string memory saltMixer = _input.saltMixer; bytes32 salt = keccak256(abi.encode(l2ChainId, saltMixer)); @@ -266,7 +237,6 @@ contract OPContractsManager is ISemver, Initializable { payable(Blueprint.deployFrom(blueprint.l1ChugSplashProxy, salt, abi.encode(output.opChainProxyAdmin))) ); output.opChainProxyAdmin.setProxyType(address(output.l1StandardBridgeProxy), IProxyAdmin.ProxyType.CHUGSPLASH); - string memory contractName = "OVM_L1CrossDomainMessenger"; output.l1CrossDomainMessengerProxy = IL1CrossDomainMessenger( Blueprint.deployFrom(blueprint.resolvedDelegateProxy, salt, abi.encode(output.addressManager, contractName)) @@ -275,10 +245,8 @@ contract OPContractsManager is ISemver, Initializable { address(output.l1CrossDomainMessengerProxy), IProxyAdmin.ProxyType.RESOLVED ); output.opChainProxyAdmin.setImplementationName(address(output.l1CrossDomainMessengerProxy), contractName); - // Now that all proxies are deployed, we can transfer ownership of the AddressManager to the ProxyAdmin. output.addressManager.transferOwnership(address(output.opChainProxyAdmin)); - // The AnchorStateRegistry Implementation is not MCP Ready, and therefore requires an implementation per chain. // It must be deployed after the DisputeGameFactoryProxy so that it can be provided as a constructor argument. output.anchorStateRegistryImpl = IAnchorStateRegistry( @@ -301,54 +269,76 @@ contract OPContractsManager is ISemver, Initializable { ); // -------- Set and Initialize Proxy Implementations -------- - Implementation memory impl; bytes memory data; - impl = getLatestImplementation("L1ERC721Bridge"); - data = encodeL1ERC721BridgeInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1ERC721BridgeProxy), impl.logic, data); + data = encodeL1ERC721BridgeInitializer(IL1ERC721Bridge.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.l1ERC721BridgeProxy), implementation.l1ERC721BridgeImpl, data + ); - impl = getLatestImplementation("OptimismPortal"); - data = encodeOptimismPortalInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.optimismPortalProxy), impl.logic, data); + data = encodeOptimismPortalInitializer(IOptimismPortal2.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.optimismPortalProxy), implementation.optimismPortalImpl, data + ); // First we upgrade the implementation so it's version can be retrieved, then we initialize // it afterwards. See the comments in encodeSystemConfigInitializer to learn more. - impl = getLatestImplementation("SystemConfig"); - output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), impl.logic); - data = encodeSystemConfigInitializer(impl.initializer, _input, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.systemConfigProxy), impl.logic, data); + output.opChainProxyAdmin.upgrade(payable(address(output.systemConfigProxy)), implementation.systemConfigImpl); + data = encodeSystemConfigInitializer(_input, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.systemConfigProxy), implementation.systemConfigImpl, data + ); - impl = getLatestImplementation("OptimismMintableERC20Factory"); - data = encodeOptimismMintableERC20FactoryInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.optimismMintableERC20FactoryProxy), impl.logic, data); + data = encodeOptimismMintableERC20FactoryInitializer(IOptimismMintableERC20Factory.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.optimismMintableERC20FactoryProxy), + implementation.optimismMintableERC20FactoryImpl, + data + ); - impl = getLatestImplementation("L1CrossDomainMessenger"); - data = encodeL1CrossDomainMessengerInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1CrossDomainMessengerProxy), impl.logic, data); + data = encodeL1CrossDomainMessengerInitializer(IL1CrossDomainMessenger.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.l1CrossDomainMessengerProxy), + implementation.l1CrossDomainMessengerImpl, + data + ); - impl = getLatestImplementation("L1StandardBridge"); - data = encodeL1StandardBridgeInitializer(impl.initializer, output); - upgradeAndCall(output.opChainProxyAdmin, address(output.l1StandardBridgeProxy), impl.logic, data); + data = encodeL1StandardBridgeInitializer(IL1StandardBridge.initialize.selector, output); + upgradeAndCall( + output.opChainProxyAdmin, address(output.l1StandardBridgeProxy), implementation.l1StandardBridgeImpl, data + ); - impl = getLatestImplementation("DelayedWETH"); - data = encodeDelayedWETHInitializer(impl.initializer, _input); + data = encodeDelayedWETHInitializer(IDelayedWETH.initialize.selector, _input); // Eventually we will switch from DelayedWETHPermissionedGameProxy to DelayedWETHPermissionlessGameProxy. - upgradeAndCall(output.opChainProxyAdmin, address(output.delayedWETHPermissionedGameProxy), impl.logic, data); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.delayedWETHPermissionedGameProxy), + implementation.delayedWETHImpl, + data + ); // We set the initial owner to this contract, set game implementations, then transfer ownership. - impl = getLatestImplementation("DisputeGameFactory"); - data = encodeDisputeGameFactoryInitializer(impl.initializer, _input); - upgradeAndCall(output.opChainProxyAdmin, address(output.disputeGameFactoryProxy), impl.logic, data); + data = encodeDisputeGameFactoryInitializer(IDisputeGameFactory.initialize.selector, _input); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.disputeGameFactoryProxy), + implementation.disputeGameFactoryImpl, + data + ); output.disputeGameFactoryProxy.setImplementation( GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(output.permissionedDisputeGame)) ); output.disputeGameFactoryProxy.transferOwnership(address(_input.roles.opChainProxyAdminOwner)); - impl.logic = address(output.anchorStateRegistryImpl); - impl.initializer = IAnchorStateRegistry.initialize.selector; - data = encodeAnchorStateRegistryInitializer(impl.initializer, _input); - upgradeAndCall(output.opChainProxyAdmin, address(output.anchorStateRegistryProxy), impl.logic, data); + data = encodeAnchorStateRegistryInitializer(IAnchorStateRegistry.initialize.selector, _input); + upgradeAndCall( + output.opChainProxyAdmin, + address(output.anchorStateRegistryProxy), + address(output.anchorStateRegistryImpl), + data + ); // -------- Finalize Deployment -------- // Transfer ownership of the ProxyAdmin from this contract to the specified owner. @@ -402,13 +392,6 @@ contract OPContractsManager is ISemver, Initializable { return Blueprint.deployFrom(blueprint.proxy, salt, abi.encode(_proxyAdmin)); } - /// @notice Returns the implementation data for a contract name. Makes a copy of the internal - // Implementation struct in storage to prevent accidental mutation of the internal data. - function getLatestImplementation(string memory _name) internal view returns (Implementation memory) { - Implementation storage impl = implementations[latestRelease][_name]; - return Implementation({ logic: impl.logic, initializer: impl.initializer }); - } - // -------- Initializer Encoding -------- /// @notice Helper method for encoding the L1ERC721Bridge initializer data. @@ -445,7 +428,6 @@ contract OPContractsManager is ISemver, Initializable { /// @notice Helper method for encoding the SystemConfig initializer data. function encodeSystemConfigInitializer( - bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -454,50 +436,22 @@ contract OPContractsManager is ISemver, Initializable { virtual returns (bytes memory) { - // We inspect the SystemConfig contract and determine it's signature here. This is required - // because this OPCM contract is being developed in a repository that no longer contains the - // SystemConfig contract that was released as part of `op-contracts/v1.6.0`, but in production - // it needs to support that version, in addition to the version currently on develop. - string memory semver = _output.systemConfigProxy.version(); - if (keccak256(abi.encode(semver)) == keccak256(abi.encode(string("2.2.0")))) { - // We are using the op-contracts/v1.6.0 SystemConfig contract. - ( - IResourceMetering.ResourceConfig memory referenceResourceConfig, - ISystemConfigV160.Addresses memory opChainAddrs - ) = defaultSystemConfigV160Params(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - _input.gasLimit, - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); - } else { - // We are using the latest SystemConfig contract from the repo. - ( - IResourceMetering.ResourceConfig memory referenceResourceConfig, - ISystemConfig.Addresses memory opChainAddrs - ) = defaultSystemConfigParams(_selector, _input, _output); - - return abi.encodeWithSelector( - _selector, - _input.roles.systemConfigOwner, - _input.basefeeScalar, - _input.blobBasefeeScalar, - bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash - _input.gasLimit, - _input.roles.unsafeBlockSigner, - referenceResourceConfig, - chainIdToBatchInboxAddress(_input.l2ChainId), - opChainAddrs - ); - } + bytes4 selector = ISystemConfig.initialize.selector; + (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = + defaultSystemConfigParams(selector, _input, _output); + + return abi.encodeWithSelector( + selector, + _input.roles.systemConfigOwner, + _input.basefeeScalar, + _input.blobBasefeeScalar, + bytes32(uint256(uint160(_input.roles.batcher))), // batcherHash + _input.gasLimit, + _input.roles.unsafeBlockSigner, + referenceResourceConfig, + chainIdToBatchInboxAddress(_input.l2ChainId), + opChainAddrs + ); } /// @notice Helper method for encoding the OptimismMintableERC20Factory initializer data. @@ -599,7 +553,7 @@ contract OPContractsManager is ISemver, Initializable { _input.disputeSplitDepth, _input.disputeClockExtension, _input.disputeMaxClockDuration, - IBigStepper(getLatestImplementation("MIPS").logic), + IBigStepper(implementation.mipsImpl), IDelayedWETH(payable(address(_output.delayedWETHPermissionedGameProxy))), IAnchorStateRegistry(address(_output.anchorStateRegistryProxy)), _input.l2ChainId, @@ -645,45 +599,6 @@ contract OPContractsManager is ISemver, Initializable { assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); } - /// @notice Returns default, standard config arguments for the SystemConfig initializer. - /// This is used by subclasses to reduce code duplication. - function defaultSystemConfigV160Params( - bytes4, /* selector */ - DeployInput memory, /* _input */ - DeployOutput memory _output - ) - internal - view - virtual - returns ( - IResourceMetering.ResourceConfig memory resourceConfig_, - ISystemConfigV160.Addresses memory opChainAddrs_ - ) - { - // We use assembly to easily convert from IResourceMetering.ResourceConfig to ResourceMetering.ResourceConfig. - // This is required because we have not yet fully migrated the codebase to be interface-based. - IResourceMetering.ResourceConfig memory resourceConfig = Constants.DEFAULT_RESOURCE_CONFIG(); - assembly ("memory-safe") { - resourceConfig_ := resourceConfig - } - - opChainAddrs_ = ISystemConfigV160.Addresses({ - l1CrossDomainMessenger: address(_output.l1CrossDomainMessengerProxy), - l1ERC721Bridge: address(_output.l1ERC721BridgeProxy), - l1StandardBridge: address(_output.l1StandardBridgeProxy), - disputeGameFactory: address(_output.disputeGameFactoryProxy), - optimismPortal: address(_output.optimismPortalProxy), - optimismMintableERC20Factory: address(_output.optimismMintableERC20FactoryProxy) - }); - - assertValidContractAddress(opChainAddrs_.l1CrossDomainMessenger); - assertValidContractAddress(opChainAddrs_.l1ERC721Bridge); - assertValidContractAddress(opChainAddrs_.l1StandardBridge); - assertValidContractAddress(opChainAddrs_.disputeGameFactory); - assertValidContractAddress(opChainAddrs_.optimismPortal); - assertValidContractAddress(opChainAddrs_.optimismMintableERC20Factory); - } - /// @notice Makes an external call to the target to initialize the proxy with the specified data. /// First performs safety checks to ensure the target, implementation, and proxy admin are valid. function upgradeAndCall( @@ -710,4 +625,9 @@ contract OPContractsManager is ISemver, Initializable { function blueprints() public view returns (Blueprints memory) { return blueprint; } + + /// @notice Returns the implementation contract addresses. + function implementations() public view returns (Implementations memory) { + return implementation; + } } diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol index 19de5537b41..79e0efda973 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerInterop.sol @@ -6,20 +6,22 @@ import { ISuperchainConfig } from "src/L1/interfaces/ISuperchainConfig.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; -/// @custom:proxied true contract OPContractsManagerInterop is OPContractsManager { constructor( ISuperchainConfig _superchainConfig, - IProtocolVersions _protocolVersions + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations ) - OPContractsManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations) { } // The `SystemConfigInterop` contract has an extra `address _dependencyManager` argument // that we must account for. function encodeSystemConfigInitializer( - bytes4 _selector, DeployInput memory _input, DeployOutput memory _output ) @@ -29,8 +31,9 @@ contract OPContractsManagerInterop is OPContractsManager { override returns (bytes memory) { + bytes4 selector = ISystemConfigInterop.initialize.selector; (IResourceMetering.ResourceConfig memory referenceResourceConfig, ISystemConfig.Addresses memory opChainAddrs) = - defaultSystemConfigParams(_selector, _input, _output); + defaultSystemConfigParams(selector, _input, _output); // TODO For now we assume that the dependency manager is the same as system config owner. // This is currently undefined since it's not part of the standard config, so we may need @@ -40,7 +43,7 @@ contract OPContractsManagerInterop is OPContractsManager { address dependencyManager = address(_input.roles.systemConfigOwner); return abi.encodeWithSelector( - _selector, + selector, _input.roles.systemConfigOwner, _input.basefeeScalar, _input.blobBasefeeScalar, diff --git a/packages/contracts-bedrock/src/L1/SystemConfig.sol b/packages/contracts-bedrock/src/L1/SystemConfig.sol index afb9525403c..51739116443 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfig.sol @@ -137,9 +137,9 @@ contract SystemConfig is OwnableUpgradeable, ISemver, IGasToken { event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 2.3.0-beta.5 + /// @custom:semver 2.3.0-beta.6 function version() public pure virtual returns (string memory) { - return "2.3.0-beta.5"; + return "2.3.0-beta.6"; } /// @notice Constructs the SystemConfig contract. Cannot set @@ -224,7 +224,6 @@ contract SystemConfig is OwnableUpgradeable, ISemver, IGasToken { _setGasPayingToken(_addresses.gasPayingToken); _setResourceConfig(_config); - require(_gasLimit >= minimumGasLimit(), "SystemConfig: gas limit too low"); } /// @notice Returns the minimum L2 gas limit that can be safely set for the system to diff --git a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol index 03210928659..9e9503fe623 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfigInterop.sol @@ -68,9 +68,9 @@ contract SystemConfigInterop is SystemConfig { Storage.setAddress(DEPENDENCY_MANAGER_SLOT, _dependencyManager); } - /// @custom:semver +interop-beta.3 + /// @custom:semver +interop-beta.4 function version() public pure override returns (string memory) { - return string.concat(super.version(), "+interop-beta.3"); + return string.concat(super.version(), "+interop-beta.4"); } /// @notice Internal setter for the gas paying token address, includes validation. diff --git a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol b/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol deleted file mode 100644 index 53fd1681276..00000000000 --- a/packages/contracts-bedrock/src/L1/interfaces/IDelayedVetoable.sol +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -interface IDelayedVetoable { - error ForwardingEarly(); - error Unauthorized(address expected, address actual); - - event DelayActivated(uint256 delay); - event Forwarded(bytes32 indexed callHash, bytes data); - event Initiated(bytes32 indexed callHash, bytes data); - event Vetoed(bytes32 indexed callHash, bytes data); - - fallback() external; - - function delay() external returns (uint256 delay_); - function initiator() external returns (address initiator_); - function queuedAt(bytes32 _callHash) external returns (uint256 queuedAt_); - function target() external returns (address target_); - function version() external view returns (string memory); - function vetoer() external returns (address vetoer_); - - function __constructor__(address _vetoer, address _initiator, address _target, uint256 _operatingDelay) external; -} diff --git a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol b/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol deleted file mode 100644 index 210b0ddf8e5..00000000000 --- a/packages/contracts-bedrock/src/L1/interfaces/ISystemConfigV160.sol +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { IResourceMetering } from "src/L1/interfaces/IResourceMetering.sol"; - -/// @notice This interface corresponds to the op-contracts/v1.6.0 release of the SystemConfig -/// contract, which has a semver of 2.2.0 as specified in -/// https://github.com/ethereum-optimism/optimism/releases/tag/op-contracts%2Fv1.6.0 -interface ISystemConfigV160 { - enum UpdateType { - BATCHER, - FEE_SCALARS, - GAS_LIMIT, - UNSAFE_BLOCK_SIGNER - } - - struct Addresses { - address l1CrossDomainMessenger; - address l1ERC721Bridge; - address l1StandardBridge; - address disputeGameFactory; - address optimismPortal; - address optimismMintableERC20Factory; - } - - event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); - event Initialized(uint8 version); - event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); - - function BATCH_INBOX_SLOT() external view returns (bytes32); - function DISPUTE_GAME_FACTORY_SLOT() external view returns (bytes32); - function L1_CROSS_DOMAIN_MESSENGER_SLOT() external view returns (bytes32); - function L1_ERC_721_BRIDGE_SLOT() external view returns (bytes32); - function L1_STANDARD_BRIDGE_SLOT() external view returns (bytes32); - function OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT() external view returns (bytes32); - function OPTIMISM_PORTAL_SLOT() external view returns (bytes32); - function START_BLOCK_SLOT() external view returns (bytes32); - function UNSAFE_BLOCK_SIGNER_SLOT() external view returns (bytes32); - function VERSION() external view returns (uint256); - function basefeeScalar() external view returns (uint32); - function batchInbox() external view returns (address addr_); - function batcherHash() external view returns (bytes32); - function blobbasefeeScalar() external view returns (uint32); - function disputeGameFactory() external view returns (address addr_); - function gasLimit() external view returns (uint64); - function gasPayingToken() external view returns (address addr_, uint8 decimals_); - function gasPayingTokenName() external view returns (string memory name_); - function gasPayingTokenSymbol() external view returns (string memory symbol_); - function initialize( - address _owner, - uint256 _basefeeScalar, - uint256 _blobbasefeeScalar, - bytes32 _batcherHash, - uint64 _gasLimit, - address _unsafeBlockSigner, - IResourceMetering.ResourceConfig memory _config, - address _batchInbox, - Addresses memory _addresses - ) - external; - function isCustomGasToken() external view returns (bool); - function l1CrossDomainMessenger() external view returns (address addr_); - function l1ERC721Bridge() external view returns (address addr_); - function l1StandardBridge() external view returns (address addr_); - function maximumGasLimit() external pure returns (uint64); - function minimumGasLimit() external view returns (uint64); - function optimismMintableERC20Factory() external view returns (address addr_); - function optimismPortal() external view returns (address addr_); - function overhead() external view returns (uint256); - function owner() external view returns (address); - function renounceOwnership() external; - function resourceConfig() external view returns (IResourceMetering.ResourceConfig memory); - function scalar() external view returns (uint256); - function setBatcherHash(bytes32 _batcherHash) external; - function setGasConfig(uint256 _overhead, uint256 _scalar) external; - function setGasConfigEcotone(uint32 _basefeeScalar, uint32 _blobbasefeeScalar) external; - function setGasLimit(uint64 _gasLimit) external; - function setUnsafeBlockSigner(address _unsafeBlockSigner) external; - function startBlock() external view returns (uint256 startBlock_); - function transferOwnership(address newOwner) external; // nosemgrep - function unsafeBlockSigner() external view returns (address addr_); - function version() external pure returns (string memory); - - function __constructor__() external; -} diff --git a/packages/contracts-bedrock/src/asterisc/RISCV.sol b/packages/contracts-bedrock/src/asterisc/RISCV.sol new file mode 100644 index 00000000000..8b0bbb5d1d7 --- /dev/null +++ b/packages/contracts-bedrock/src/asterisc/RISCV.sol @@ -0,0 +1,1652 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { IPreimageOracle } from "src/cannon/interfaces/IPreimageOracle.sol"; +import { IBigStepper } from "src/dispute/interfaces/IBigStepper.sol"; + +/// @title RISCV +/// @notice The RISCV contract emulates a single RISCV hart cycle statelessly, using memory proofs to verify the +/// instruction and optional memory access' inclusion in the memory merkle root provided in the trusted +/// prestate witness. +/// This contract has been vendorized from the Asterisc project. The original source code can be found at +/// @dev https://github.com/ethereum-optimism/asterisc +contract RISCV is IBigStepper { + /// @notice The preimage oracle contract. + IPreimageOracle public oracle; + + /// @notice The version of the contract. + /// @custom:semver 1.1.0-rc.2 + string public constant version = "1.1.0-rc.2"; + + /// @param _oracle The preimage oracle contract. + constructor(IPreimageOracle _oracle) { + oracle = _oracle; + } + + /// @inheritdoc IBigStepper + function step(bytes calldata _stateData, bytes calldata _proof, bytes32 _localContext) public returns (bytes32) { + assembly { + function revertWithCode(code) { + mstore(0, code) + revert(0, 0x20) + } + + function preimageOraclePos() -> out { + // slot of preimageOraclePos field + out := 0 + } + + // + // Yul64 - functions to do 64 bit math - see yul64.go + // + function u64Mask() -> out { + // max uint64 + out := shr(192, not(0)) // 256-64 = 192 + } + + function u32Mask() -> out { + out := U64(shr(toU256(224), not(0))) // 256-32 = 224 + } + + function toU64(v) -> out { + out := v + } + + function shortToU64(v) -> out { + out := v + } + + function shortToU256(v) -> out { + out := v + } + + function longToU256(v) -> out { + out := v + } + + function u256ToU64(v) -> out { + out := and(v, U256(u64Mask())) + } + + function u64ToU256(v) -> out { + out := v + } + + function mask32Signed64(v) -> out { + out := signExtend64(and64(v, u32Mask()), toU64(31)) + } + + function u64Mod() -> out { + // 1 << 64 + out := shl(toU256(64), toU256(1)) + } + + function u64TopBit() -> out { + // 1 << 63 + out := shl(toU256(63), toU256(1)) + } + + function signExtend64(v, bit) -> out { + switch and(v, shl(bit, 1)) + case 0 { + // fill with zeroes, by masking + out := U64(and(U256(v), shr(sub(toU256(63), bit), U256(u64Mask())))) + } + default { + // fill with ones, by or-ing + out := U64(or(U256(v), shl(bit, shr(bit, U256(u64Mask()))))) + } + } + + function signExtend64To256(v) -> out { + switch and(U256(v), u64TopBit()) + case 0 { out := v } + default { out := or(shl(toU256(64), not(0)), v) } + } + + function add64(x, y) -> out { + out := U64(mod(add(U256(x), U256(y)), u64Mod())) + } + + function sub64(x, y) -> out { + out := U64(mod(sub(U256(x), U256(y)), u64Mod())) + } + + function mul64(x, y) -> out { + out := u256ToU64(mul(U256(x), U256(y))) + } + + function div64(x, y) -> out { + out := u256ToU64(div(U256(x), U256(y))) + } + + function sdiv64(x, y) -> out { + // note: signed overflow semantics are the same between Go and EVM assembly + out := u256ToU64(sdiv(signExtend64To256(x), signExtend64To256(y))) + } + + function mod64(x, y) -> out { + out := U64(mod(U256(x), U256(y))) + } + + function smod64(x, y) -> out { + out := u256ToU64(smod(signExtend64To256(x), signExtend64To256(y))) + } + + function not64(x) -> out { + out := u256ToU64(not(U256(x))) + } + + function lt64(x, y) -> out { + out := U64(lt(U256(x), U256(y))) + } + + function gt64(x, y) -> out { + out := U64(gt(U256(x), U256(y))) + } + + function slt64(x, y) -> out { + out := U64(slt(signExtend64To256(x), signExtend64To256(y))) + } + + function sgt64(x, y) -> out { + out := U64(sgt(signExtend64To256(x), signExtend64To256(y))) + } + + function eq64(x, y) -> out { + out := U64(eq(U256(x), U256(y))) + } + + function iszero64(x) -> out { + out := iszero(U256(x)) + } + + function and64(x, y) -> out { + out := U64(and(U256(x), U256(y))) + } + + function or64(x, y) -> out { + out := U64(or(U256(x), U256(y))) + } + + function xor64(x, y) -> out { + out := U64(xor(U256(x), U256(y))) + } + + function shl64(x, y) -> out { + out := u256ToU64(shl(U256(x), U256(y))) + } + + function shr64(x, y) -> out { + out := U64(shr(U256(x), U256(y))) + } + + function sar64(x, y) -> out { + out := u256ToU64(sar(U256(x), signExtend64To256(y))) + } + + // type casts, no-op in yul + function b32asBEWord(v) -> out { + out := v + } + function beWordAsB32(v) -> out { + out := v + } + function U64(v) -> out { + out := v + } + function U256(v) -> out { + out := v + } + function toU256(v) -> out { + out := v + } + + // + // Bit hacking util + // + function bitlen(x) -> n { + if gt(x, sub(shl(128, 1), 1)) { + x := shr(128, x) + n := add(n, 128) + } + if gt(x, sub(shl(64, 1), 1)) { + x := shr(64, x) + n := add(n, 64) + } + if gt(x, sub(shl(32, 1), 1)) { + x := shr(32, x) + n := add(n, 32) + } + if gt(x, sub(shl(16, 1), 1)) { + x := shr(16, x) + n := add(n, 16) + } + if gt(x, sub(shl(8, 1), 1)) { + x := shr(8, x) + n := add(n, 8) + } + if gt(x, sub(shl(4, 1), 1)) { + x := shr(4, x) + n := add(n, 4) + } + if gt(x, sub(shl(2, 1), 1)) { + x := shr(2, x) + n := add(n, 2) + } + if gt(x, sub(shl(1, 1), 1)) { + x := shr(1, x) + n := add(n, 1) + } + if gt(x, 0) { n := add(n, 1) } + } + + function endianSwap(x) -> out { + for { let i := 0 } lt(i, 32) { i := add(i, 1) } { + out := or(shl(8, out), and(x, 0xff)) + x := shr(8, x) + } + } + + // + // State layout + // + function stateSizeMemRoot() -> out { + out := 32 + } + function stateSizePreimageKey() -> out { + out := 32 + } + function stateSizePreimageOffset() -> out { + out := 8 + } + function stateSizePC() -> out { + out := 8 + } + function stateSizeExitCode() -> out { + out := 1 + } + function stateSizeExited() -> out { + out := 1 + } + function stateSizeStep() -> out { + out := 8 + } + function stateSizeHeap() -> out { + out := 8 + } + function stateSizeLoadReservation() -> out { + out := 8 + } + function stateSizeRegisters() -> out { + out := mul(8, 32) + } + + function stateOffsetMemRoot() -> out { + out := 0 + } + function stateOffsetPreimageKey() -> out { + out := add(stateOffsetMemRoot(), stateSizeMemRoot()) + } + function stateOffsetPreimageOffset() -> out { + out := add(stateOffsetPreimageKey(), stateSizePreimageKey()) + } + function stateOffsetPC() -> out { + out := add(stateOffsetPreimageOffset(), stateSizePreimageOffset()) + } + function stateOffsetExitCode() -> out { + out := add(stateOffsetPC(), stateSizePC()) + } + function stateOffsetExited() -> out { + out := add(stateOffsetExitCode(), stateSizeExitCode()) + } + function stateOffsetStep() -> out { + out := add(stateOffsetExited(), stateSizeExited()) + } + function stateOffsetHeap() -> out { + out := add(stateOffsetStep(), stateSizeStep()) + } + function stateOffsetLoadReservation() -> out { + out := add(stateOffsetHeap(), stateSizeHeap()) + } + function stateOffsetRegisters() -> out { + out := add(stateOffsetLoadReservation(), stateSizeLoadReservation()) + } + function stateSize() -> out { + out := add(stateOffsetRegisters(), stateSizeRegisters()) + } + + // + // Initial EVM memory / calldata checks + // + if iszero(eq(mload(0x40), 0x80)) { + // expected memory check: no allocated memory (start after scratch + free-mem-ptr + zero slot = 0x80) + revert(0, 0) + } + if iszero(eq(_stateData.offset, 132)) { + // 32*4+4 = 132 expected state data offset + revert(0, 0) + } + if iszero(eq(calldataload(sub(_stateData.offset, 32)), stateSize())) { + // user-provided state size must match expected state size + revert(0, 0) + } + function paddedLen(v) -> out { + // padded to multiple of 32 bytes + let padding := mod(sub(32, mod(v, 32)), 32) + out := add(v, padding) + } + if iszero(eq(_proof.offset, add(add(_stateData.offset, paddedLen(stateSize())), 32))) { + // 132+stateSize+padding+32 = expected proof offset + revert(0, 0) + } + function proofContentOffset() -> out { + // since we can't reference proof.offset in functions, blame Yul + // 132+362+(32-362%32)+32=548 + out := 548 + } + if iszero(eq(_proof.offset, proofContentOffset())) { revert(0, 0) } + + // + // State loading + // + function memStateOffset() -> out { + out := 0x80 + } + // copy the state calldata into memory, so we can mutate it + mstore(0x40, add(memStateOffset(), stateSize())) // alloc, update free mem pointer + calldatacopy(memStateOffset(), _stateData.offset, stateSize()) // same format in memory as in calldata + + // + // State access + // + function readState(offset, length) -> out { + out := mload(add(memStateOffset(), offset)) // note: the state variables are all big-endian encoded + out := shr(shl(3, sub(32, length)), out) // shift-right to right-align data and reduce to desired length + } + function writeState(offset, length, data) { + let memOffset := add(memStateOffset(), offset) + // left-aligned mask of length bytes + let mask := shl(shl(3, sub(32, length)), not(0)) + let prev := mload(memOffset) + // align data to left + data := shl(shl(3, sub(32, length)), data) + // mask out data from previous word, and apply new data + let result := or(and(prev, not(mask)), data) + mstore(memOffset, result) + } + + function getMemRoot() -> out { + out := readState(stateOffsetMemRoot(), stateSizeMemRoot()) + } + function setMemRoot(v) { + writeState(stateOffsetMemRoot(), stateSizeMemRoot(), v) + } + + function getPreimageKey() -> out { + out := readState(stateOffsetPreimageKey(), stateSizePreimageKey()) + } + function setPreimageKey(k) { + writeState(stateOffsetPreimageKey(), stateSizePreimageKey(), k) + } + + function getPreimageOffset() -> out { + out := readState(stateOffsetPreimageOffset(), stateSizePreimageOffset()) + } + function setPreimageOffset(v) { + writeState(stateOffsetPreimageOffset(), stateSizePreimageOffset(), v) + } + + function getPC() -> out { + out := readState(stateOffsetPC(), stateSizePC()) + } + function setPC(v) { + writeState(stateOffsetPC(), stateSizePC(), v) + } + + function getExited() -> out { + out := readState(stateOffsetExited(), stateSizeExited()) + } + function setExited() { + writeState(stateOffsetExited(), stateSizeExited(), 1) + } + + function getExitCode() -> out { + out := readState(stateOffsetExitCode(), stateSizeExitCode()) + } + function setExitCode(v) { + writeState(stateOffsetExitCode(), stateSizeExitCode(), v) + } + + function getStep() -> out { + out := readState(stateOffsetStep(), stateSizeStep()) + } + function setStep(v) { + writeState(stateOffsetStep(), stateSizeStep(), v) + } + + function getHeap() -> out { + out := readState(stateOffsetHeap(), stateSizeHeap()) + } + function setHeap(v) { + writeState(stateOffsetHeap(), stateSizeHeap(), v) + } + + function getLoadReservation() -> out { + out := readState(stateOffsetLoadReservation(), stateSizeLoadReservation()) + } + function setLoadReservation(addr) { + writeState(stateOffsetLoadReservation(), stateSizeLoadReservation(), addr) + } + + function getRegister(reg) -> out { + if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // cannot load invalid register + + let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) + out := readState(offset, 8) + } + function setRegister(reg, v) { + if iszero64(reg) { + // reg 0 must stay 0 + // v is a HINT, but no hints are specified by standard spec, or used by us. + leave + } + if gt64(reg, toU64(31)) { revertWithCode(0xbad4e9) } // unknown register + + let offset := add64(toU64(stateOffsetRegisters()), mul64(reg, toU64(8))) + writeState(offset, 8, v) + } + + // + // State output + // + function vmStatus() -> status { + switch getExited() + case 1 { + switch getExitCode() + case 0 { status := 0 } + // VMStatusValid + case 1 { status := 1 } + // VMStatusInvalid + default { status := 2 } // VMStatusPanic + } + default { status := 3 } // VMStatusUnfinished + } + + function computeStateHash() -> out { + // Log the RISC-V state for debugging + log0(memStateOffset(), stateSize()) + + out := keccak256(memStateOffset(), stateSize()) + out := or(and(not(shl(248, 0xFF)), out), shl(248, vmStatus())) + } + + // + // Parse - functions to parse RISC-V instructions - see parse.go + // + function parseImmTypeI(instr) -> out { + out := signExtend64(shr64(toU64(20), instr), toU64(11)) + } + + function parseImmTypeS(instr) -> out { + out := + signExtend64( + or64(shl64(toU64(5), shr64(toU64(25), instr)), and64(shr64(toU64(7), instr), toU64(0x1F))), + toU64(11) + ) + } + + function parseImmTypeB(instr) -> out { + out := + signExtend64( + or64( + or64( + shl64(toU64(1), and64(shr64(toU64(8), instr), toU64(0xF))), + shl64(toU64(5), and64(shr64(toU64(25), instr), toU64(0x3F))) + ), + or64( + shl64(toU64(11), and64(shr64(toU64(7), instr), toU64(1))), + shl64(toU64(12), shr64(toU64(31), instr)) + ) + ), + toU64(12) + ) + } + + function parseImmTypeU(instr) -> out { + out := signExtend64(shr64(toU64(12), instr), toU64(19)) + } + + function parseImmTypeJ(instr) -> out { + out := + signExtend64( + or64( + or64( + and64(shr64(toU64(21), instr), shortToU64(0x3FF)), // 10 bits for index 0:9 + shl64(toU64(10), and64(shr64(toU64(20), instr), toU64(1))) // 1 bit for index 10 + ), + or64( + shl64(toU64(11), and64(shr64(toU64(12), instr), toU64(0xFF))), // 8 bits for index 11:18 + shl64(toU64(19), shr64(toU64(31), instr)) // 1 bit for index 19 + ) + ), + toU64(19) + ) + } + + function parseOpcode(instr) -> out { + out := and64(instr, toU64(0x7F)) + } + + function parseRd(instr) -> out { + out := and64(shr64(toU64(7), instr), toU64(0x1F)) + } + + function parseFunct3(instr) -> out { + out := and64(shr64(toU64(12), instr), toU64(0x7)) + } + + function parseRs1(instr) -> out { + out := and64(shr64(toU64(15), instr), toU64(0x1F)) + } + + function parseRs2(instr) -> out { + out := and64(shr64(toU64(20), instr), toU64(0x1F)) + } + + function parseFunct7(instr) -> out { + out := shr64(toU64(25), instr) + } + + // + // Memory functions + // + function proofOffset(proofIndex) -> offset { + // proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes + offset := mul64(mul64(toU64(proofIndex), toU64(60)), toU64(32)) + offset := add64(offset, proofContentOffset()) + } + + function hashPair(a, b) -> h { + mstore(0, a) + mstore(0x20, b) + h := keccak256(0, 0x40) + } + + function getMemoryB32(addr, proofIndex) -> out { + if and64(addr, toU64(31)) { + // quick addr alignment check + revertWithCode(0xbad10ad0) // addr not aligned with 32 bytes + } + let offset := proofOffset(proofIndex) + let leaf := calldataload(offset) + offset := add64(offset, toU64(32)) + + let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf + let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct + // the root + for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { + let sibling := calldataload(offset) + offset := add64(offset, toU64(32)) + switch and64(shr64(toU64(i), path), toU64(1)) + case 0 { node := hashPair(node, sibling) } + case 1 { node := hashPair(sibling, node) } + } + let memRoot := getMemRoot() + if iszero(eq(b32asBEWord(node), b32asBEWord(memRoot))) { + // verify the root matches + revertWithCode(0xbadf00d1) // bad memory proof + } + out := leaf + } + + // warning: setMemoryB32 does not verify the proof, + // it assumes the same memory proof has been verified with getMemoryB32 + function setMemoryB32(addr, v, proofIndex) { + if and64(addr, toU64(31)) { revertWithCode(0xbad10ad0) } // addr not aligned with 32 bytes + + let offset := proofOffset(proofIndex) + let leaf := v + offset := add64(offset, toU64(32)) + let path := shr64(toU64(5), addr) // 32 bytes of memory per leaf + let node := leaf // starting from the leaf node, work back up by combining with siblings, to reconstruct + // the root + for { let i := 0 } lt(i, sub(64, 5)) { i := add(i, 1) } { + let sibling := calldataload(offset) + offset := add64(offset, toU64(32)) + + switch and64(shr64(toU64(i), path), toU64(1)) + case 0 { node := hashPair(node, sibling) } + case 1 { node := hashPair(sibling, node) } + } + setMemRoot(node) // store new memRoot + } + + // load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory + function loadMem(addr, size, signed, proofIndexL, proofIndexR) -> out { + if gt(size, 8) { revertWithCode(0xbad512e0) } // cannot load more than 8 bytes + // load/verify left part + let leftAddr := and64(addr, not64(toU64(31))) + let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) + let alignment := sub64(addr, leftAddr) + + let right := 0 + let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) + let leftShamt := sub64(sub64(toU64(32), alignment), size) + let rightShamt := toU64(0) + if iszero64(eq64(leftAddr, rightAddr)) { + // if unaligned, use second proof for the right part + if eq(proofIndexR, 0xff) { revertWithCode(0xbad22220) } // unexpected need for right-side proof in + // loadMem + // load/verify right part + right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) + // left content is aligned to right of 32 bytes + leftShamt := toU64(0) + rightShamt := sub64(sub64(toU64(64), alignment), size) + } + + let addr_ := addr + let size_ := size + // left: prepare for byte-taking by right-aligning + left := shr(u64ToU256(shl64(toU64(3), leftShamt)), left) + // right: right-align for byte-taking by right-aligning + right := shr(u64ToU256(shl64(toU64(3), rightShamt)), right) + // loop: + for { let i := 0 } lt(i, size_) { i := add(i, 1) } { + // translate to reverse byte lookup, since we are reading little-endian memory, and need the highest + // byte first. + // effAddr := (addr + size - 1 - i) &^ 31 + let effAddr := and64(sub64(sub64(add64(addr_, size_), toU64(1)), toU64(i)), not64(toU64(31))) + // take a byte from either left or right, depending on the effective address + let b := toU256(0) + switch eq64(effAddr, leftAddr) + case 1 { + b := and(left, toU256(0xff)) + left := shr(toU256(8), left) + } + case 0 { + b := and(right, toU256(0xff)) + right := shr(toU256(8), right) + } + // append it to the output + out := or64(shl64(toU64(8), out), u256ToU64(b)) + } + + if signed { + let signBitShift := sub64(shl64(toU64(3), size_), toU64(1)) + out := signExtend64(out, signBitShift) + } + } + + // Splits the value into a left and a right part, each with a mask (identify data) and a patch (diff + // content). + function leftAndRight(alignment, size, value) -> leftMask, rightMask, leftPatch, rightPatch { + let start := alignment + let end := add64(alignment, size) + for { let i := 0 } lt(i, 64) { i := add(i, 1) } { + let index := toU64(i) + let leftSide := lt64(index, toU64(32)) + switch leftSide + case 1 { + leftPatch := shl(8, leftPatch) + leftMask := shl(8, leftMask) + } + case 0 { + rightPatch := shl(8, rightPatch) + rightMask := shl(8, rightMask) + } + if and64(eq64(lt64(index, start), toU64(0)), lt64(index, end)) { + // if alignment <= i < alignment+size + let b := and(shr(u64ToU256(shl64(toU64(3), sub64(index, alignment))), value), toU256(0xff)) + switch leftSide + case 1 { + leftPatch := or(leftPatch, b) + leftMask := or(leftMask, toU256(0xff)) + } + case 0 { + rightPatch := or(rightPatch, b) + rightMask := or(rightMask, toU256(0xff)) + } + } + } + } + + function storeMemUnaligned(addr, size, value, proofIndexL, proofIndexR) { + if gt(size, 32) { revertWithCode(0xbad512e1) } // cannot store more than 32 bytes + + let leftAddr := and64(addr, not64(toU64(31))) + let rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31))) + let alignment := sub64(addr, leftAddr) + let leftMask, rightMask, leftPatch, rightPatch := leftAndRight(alignment, size, value) + + // load the left base + let left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL)) + // apply the left patch + left := or(and(left, not(leftMask)), leftPatch) + // write the left + setMemoryB32(leftAddr, beWordAsB32(left), proofIndexL) + + // if aligned: nothing more to do here + if eq64(leftAddr, rightAddr) { leave } + if eq(proofIndexR, 0xff) { revertWithCode(0xbad22221) } // unexpected need for right-side proof in + // storeMem + // load the right base (with updated mem root) + let right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR)) + // apply the right patch + right := or(and(right, not(rightMask)), rightPatch) + // write the right (with updated mem root) + setMemoryB32(rightAddr, beWordAsB32(right), proofIndexR) + } + + function storeMem(addr, size, value, proofIndexL, proofIndexR) { + storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) + } + + // + // Preimage oracle interactions + // + function writePreimageKey(addr, count) -> out { + // adjust count down, so we only have to read a single 32 byte leaf of memory + let alignment := and64(addr, toU64(31)) + let maxData := sub64(toU64(32), alignment) + if gt64(count, maxData) { count := maxData } + + let dat := b32asBEWord(getMemoryB32(sub64(addr, alignment), 1)) + // shift out leading bits + dat := shl(u64ToU256(shl64(toU64(3), alignment)), dat) + // shift to right end, remove trailing bits + dat := shr(u64ToU256(shl64(toU64(3), sub64(toU64(32), count))), dat) + + let bits := shl(toU256(3), u64ToU256(count)) + + let preImageKey := getPreimageKey() + + // Append to key content by bit-shifting + let key := b32asBEWord(preImageKey) + key := shl(bits, key) + key := or(key, dat) + + // We reset the pre-image value offset back to 0 (the right part of the merkle pair) + setPreimageKey(beWordAsB32(key)) + setPreimageOffset(toU64(0)) + out := count + } + + function readPreimagePart(key, offset) -> dat, datlen { + let addr := sload(preimageOraclePos()) // calling Oracle.readPreimage(bytes32,uint256) + let memPtr := mload(0x40) // get pointer to free memory for preimage interactions + mstore(memPtr, shl(224, 0xe03110e1)) // (32-4)*8=224: right-pad the function selector, and then store it + // as prefix + mstore(add(memPtr, 0x04), key) + mstore(add(memPtr, 0x24), offset) + let res := call(gas(), addr, 0, memPtr, 0x44, 0x00, 0x40) // output into scratch space + if res { + // 1 on success + dat := mload(0x00) + datlen := mload(0x20) + leave + } + revertWithCode(0xbadf00d0) + } + + // Original implementation is at src/cannon/PreimageKeyLib.sol + // but it cannot be used because this is inside assembly block + function localize(preImageKey, localContext_) -> localizedKey { + // Grab the current free memory pointer to restore later. + let ptr := mload(0x40) + // Store the local data key and caller next to each other in memory for hashing. + mstore(0, preImageKey) + mstore(0x20, caller()) + mstore(0x40, localContext_) + // Localize the key with the above `localize` operation. + localizedKey := or(and(keccak256(0, 0x60), not(shl(248, 0xFF))), shl(248, 1)) + // Restore the free memory pointer. + mstore(0x40, ptr) + } + + function readPreimageValue(addr, count, localContext_) -> out { + let preImageKey := getPreimageKey() + let offset := getPreimageOffset() + // If the preimage key is a local key, localize it in the context of the caller. + let preImageKeyPrefix := shr(248, preImageKey) // 256-8=248 + if eq(preImageKeyPrefix, 1) { preImageKey := localize(preImageKey, localContext_) } + // make call to pre-image oracle contract + let pdatB32, pdatlen := readPreimagePart(preImageKey, offset) + if iszero64(pdatlen) { + // EOF + out := toU64(0) + leave + } + let alignment := and64(addr, toU64(31)) // how many bytes addr is offset from being left-aligned + let maxData := sub64(toU64(32), alignment) // higher alignment leaves less room for data this step + if gt64(count, maxData) { count := maxData } + if gt64(count, pdatlen) { + // cannot read more than pdatlen + count := pdatlen + } + + let addr_ := addr + let count_ := count + let bits := shl64(toU64(3), sub64(toU64(32), count_)) // 32-count, in bits + let mask := not(sub(shl(u64ToU256(bits), toU256(1)), toU256(1))) // left-aligned mask for count bytes + let alignmentBits := u64ToU256(shl64(toU64(3), alignment)) + mask := shr(alignmentBits, mask) // mask of count bytes, shifted by alignment + let pdat := shr(alignmentBits, b32asBEWord(pdatB32)) // pdat, shifted by alignment + + // update pre-image reader with updated offset + let newOffset := add64(offset, count_) + setPreimageOffset(newOffset) + + out := count_ + + let node := getMemoryB32(sub64(addr_, alignment), 1) + let dat := and(b32asBEWord(node), not(mask)) // keep old bytes outside of mask + dat := or(dat, and(pdat, mask)) // fill with bytes from pdat + setMemoryB32(sub64(addr_, alignment), beWordAsB32(dat), 1) + } + + // + // Syscall handling + // + function sysCall(localContext_) { + let a7 := getRegister(toU64(17)) + switch a7 + case 93 { + // exit the calling thread. No multi-thread support yet, so just exit. + let a0 := getRegister(toU64(10)) + setExitCode(and(a0, 0xff)) + setExited() + // program stops here, no need to change registers. + } + case 94 { + // exit-group + let a0 := getRegister(toU64(10)) + setExitCode(and(a0, 0xff)) + setExited() + } + case 214 { + // brk + // Go sys_linux_riscv64 runtime will only ever call brk(NULL), i.e. first argument (register a0) set + // to 0. + + // brk(0) changes nothing about the memory, and returns the current page break + let v := shl64(toU64(30), toU64(1)) // set program break at 1 GiB + setRegister(toU64(10), v) + setRegister(toU64(11), toU64(0)) // no error + } + case 222 { + // mmap + // A0 = addr (hint) + let addr := getRegister(toU64(10)) + // A1 = n (length) + let length := getRegister(toU64(11)) + // A2 = prot (memory protection type, can ignore) + // A3 = flags (shared with other process and or written back to file) + let flags := getRegister(toU64(13)) + // A4 = fd (file descriptor, can ignore because we support anon memory only) + let fd := getRegister(toU64(14)) + // A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this) + + let errCode := 0 + // ensure MAP_ANONYMOUS is set and fd == -1 + switch or(iszero(and(flags, 0x20)), not(eq(fd, u64Mask()))) + case 1 { + addr := u64Mask() + errCode := toU64(0x4d) + } + default { + switch addr + case 0 { + // No hint, allocate it ourselves, by as much as the requested length. + // Increase the length to align it with desired page size if necessary. + let align := and64(length, shortToU64(4095)) + if align { length := add64(length, sub64(shortToU64(4096), align)) } + let prevHeap := getHeap() + addr := prevHeap + setHeap(add64(prevHeap, length)) // increment heap with length + } + default { + // allow hinted memory address (leave it in A0 as return argument) + } + } + + setRegister(toU64(10), addr) + setRegister(toU64(11), errCode) + } + case 63 { + // read + let fd := getRegister(toU64(10)) // A0 = fd + let addr := getRegister(toU64(11)) // A1 = *buf addr + let count := getRegister(toU64(12)) // A2 = count + let n := 0 + let errCode := 0 + switch fd + case 0 { + // stdin + n := toU64(0) // never read anything from stdin + errCode := toU64(0) + } + case 3 { + // hint-read + // say we read it all, to continue execution after reading the hint-write ack response + n := count + errCode := toU64(0) + } + case 5 { + // preimage read + n := readPreimageValue(addr, count, localContext_) + errCode := toU64(0) + } + default { + n := u64Mask() // -1 (reading error) + errCode := toU64(0x4d) // EBADF + } + setRegister(toU64(10), n) + setRegister(toU64(11), errCode) + } + case 64 { + // write + let fd := getRegister(toU64(10)) // A0 = fd + let addr := getRegister(toU64(11)) // A1 = *buf addr + let count := getRegister(toU64(12)) // A2 = count + let n := 0 + let errCode := 0 + switch fd + case 1 { + // stdout + n := count // write completes fully in single instruction step + errCode := toU64(0) + } + case 2 { + // stderr + n := count // write completes fully in single instruction step + errCode := toU64(0) + } + case 4 { + // hint-write + n := count + errCode := toU64(0) + } + case 6 { + // pre-image key-write + n := writePreimageKey(addr, count) + errCode := toU64(0) // no error + } + default { + // any other file, including (3) hint read (5) preimage read + n := u64Mask() // -1 (writing error) + errCode := toU64(0x4d) // EBADF + } + setRegister(toU64(10), n) + setRegister(toU64(11), errCode) + } + case 25 { + // fcntl - file descriptor manipulation / info lookup + let fd := getRegister(toU64(10)) // A0 = fd + let cmd := getRegister(toU64(11)) // A1 = cmd + let out := 0 + let errCode := 0 + switch cmd + case 0x1 { + // F_GETFD: get file descriptor flags + switch fd + case 0 { + // stdin + out := toU64(0) // no flag set + } + case 1 { + // stdout + out := toU64(0) // no flag set + } + case 2 { + // stderr + out := toU64(0) // no flag set + } + case 3 { + // hint-read + out := toU64(0) // no flag set + } + case 4 { + // hint-write + out := toU64(0) // no flag set + } + case 5 { + // pre-image read + out := toU64(0) // no flag set + } + case 6 { + // pre-image write + out := toU64(0) // no flag set + } + default { + out := u64Mask() + errCode := toU64(0x4d) //EBADF + } + } + case 0x3 { + // F_GETFL: get file descriptor flags + switch fd + case 0 { + // stdin + out := toU64(0) // O_RDONLY + } + case 1 { + // stdout + out := toU64(1) // O_WRONLY + } + case 2 { + // stderr + out := toU64(1) // O_WRONLY + } + case 3 { + // hint-read + out := toU64(0) // O_RDONLY + } + case 4 { + // hint-write + out := toU64(1) // O_WRONLY + } + case 5 { + // pre-image read + out := toU64(0) // O_RDONLY + } + case 6 { + // pre-image write + out := toU64(1) // O_WRONLY + } + default { + out := u64Mask() + errCode := toU64(0x4d) // EBADF + } + } + default { + // no other commands: don't allow changing flags, duplicating FDs, etc. + out := u64Mask() + errCode := toU64(0x16) // EINVAL (cmd not recognized by this kernel) + } + setRegister(toU64(10), out) + setRegister(toU64(11), errCode) // EBADF + } + case 56 { + // openat - the Go linux runtime will try to open optional /sys/kernel files for performance hints + setRegister(toU64(10), u64Mask()) + setRegister(toU64(11), toU64(0xd)) // EACCES - no access allowed + } + case 113 { + // clock_gettime + let addr := getRegister(toU64(11)) // addr of timespec struct + // write 1337s + 42ns as time + let value := or(shortToU256(1337), shl(shortToU256(64), toU256(42))) + storeMemUnaligned(addr, toU64(16), value, 1, 2) + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + case 220 { + // clone - not supported + setRegister(toU64(10), toU64(1)) + setRegister(toU64(11), toU64(0)) + } + case 163 { + // getrlimit + let res := getRegister(toU64(10)) + let addr := getRegister(toU64(11)) + switch res + case 0x7 { + // RLIMIT_NOFILE + // first 8 bytes: soft limit. 1024 file handles max open + // second 8 bytes: hard limit + storeMemUnaligned( + addr, toU64(16), or(shortToU256(1024), shl(toU256(64), shortToU256(1024))), 1, 2 + ) + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + default { revertWithCode(0xf0012) } // unrecognized resource limit lookup + } + case 261 { + // prlimit64 -- unsupported, we have getrlimit, is prlimit64 even called? + revertWithCode(0xf001ca11) // unsupported system call + } + case 422 { + // futex - not supported, for now + revertWithCode(0xf001ca11) // unsupported system call + } + case 101 { + // nanosleep - not supported, for now + revertWithCode(0xf001ca11) // unsupported system call + } + default { + // Ignore(no-op) unsupported system calls + setRegister(toU64(10), toU64(0)) + setRegister(toU64(11), toU64(0)) + } + } + + // + // Instruction execution + // + if getExited() { + // early exit if we can + mstore(0, computeStateHash()) + return(0, 0x20) + } + setStep(add64(getStep(), toU64(1))) + + let _pc := getPC() + let instr := loadMem(_pc, toU64(4), false, 0, 0xff) // raw instruction + + // these fields are ignored if not applicable to the instruction type / opcode + let opcode := parseOpcode(instr) + let rd := parseRd(instr) // destination register index + let funct3 := parseFunct3(instr) + let rs1 := parseRs1(instr) // source register 1 index + let rs2 := parseRs2(instr) // source register 2 index + let funct7 := parseFunct7(instr) + + switch opcode + case 0x03 { + let pc_ := _pc + // 000_0011: memory loading + // LB, LH, LW, LD, LBU, LHU, LWU + let imm := parseImmTypeI(instr) + let signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag + let size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size + let rs1Value := getRegister(rs1) + let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) + let rdValue := loadMem(memIndex, size, signed, 1, 2) + setRegister(rd, rdValue) + setPC(add64(pc_, toU64(4))) + } + case 0x23 { + let pc_ := _pc + // 010_0011: memory storing + // SB, SH, SW, SD + let imm := parseImmTypeS(instr) + let size := shl64(funct3, toU64(1)) + let value := getRegister(rs2) + let rs1Value := getRegister(rs1) + let memIndex := add64(rs1Value, signExtend64(imm, toU64(11))) + storeMem(memIndex, size, value, 1, 2) + setPC(add64(pc_, toU64(4))) + } + case 0x63 { + // 110_0011: branching + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let branchHit := toU64(0) + switch funct3 + case 0 { + // 000 = BEQ + branchHit := eq64(rs1Value, rs2Value) + } + case 1 { + // 001 = BNE + branchHit := and64(not64(eq64(rs1Value, rs2Value)), toU64(1)) + } + case 4 { + // 100 = BLT + branchHit := slt64(rs1Value, rs2Value) + } + case 5 { + // 101 = BGE + branchHit := and64(not64(slt64(rs1Value, rs2Value)), toU64(1)) + } + case 6 { + // 110 = BLTU + branchHit := lt64(rs1Value, rs2Value) + } + case 7 { + // 111 := BGEU + branchHit := and64(not64(lt64(rs1Value, rs2Value)), toU64(1)) + } + switch branchHit + case 0 { _pc := add64(_pc, toU64(4)) } + default { + let imm := parseImmTypeB(instr) + // imm12 is a signed offset, in multiples of 2 bytes. + // So it's really 13 bits with a hardcoded 0 bit. + _pc := add64(_pc, imm) + } + // not like the other opcodes: nothing to write to rd register, and PC has already changed + setPC(_pc) + } + case 0x13 { + // 001_0011: immediate arithmetic and logic + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := 0 + switch funct3 + case 0 { + // 000 = ADDI + rdValue := add64(rs1Value, imm) + } + case 1 { + // 001 = SLLI + rdValue := shl64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + case 2 { + // 010 = SLTI + rdValue := slt64(rs1Value, imm) + } + case 3 { + // 011 = SLTIU + rdValue := lt64(rs1Value, imm) + } + case 4 { + // 100 = XORI + rdValue := xor64(rs1Value, imm) + } + case 5 { + // 101 = SR~ + switch shr64(toU64(6), imm) + // in rv64i the top 6 bits select the shift type + case 0x00 { + // 000000 = SRLI + rdValue := shr64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + case 0x10 { + // 010000 = SRAI + rdValue := sar64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode + } + } + case 6 { + // 110 = ORI + rdValue := or64(rs1Value, imm) + } + case 7 { + // 111 = ANDI + rdValue := and64(rs1Value, imm) + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x1B { + // 001_1011: immediate arithmetic and logic signed 32 bit + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := 0 + switch funct3 + case 0 { + // 000 = ADDIW + rdValue := mask32Signed64(add64(rs1Value, imm)) + } + case 1 { + // 001 = SLLIW + rdValue := mask32Signed64(shl64(and64(imm, toU64(0x1F)), rs1Value)) + } + case 5 { + // 101 = SR~ + let shamt := and64(imm, toU64(0x1F)) + switch shr64(toU64(5), imm) + // top 7 bits select the shift type + case 0x00 { + // 0000000 = SRLIW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) + } + case 0x20 { + // 0100000 = SRAIW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x33 { + // 011_0011: register arithmetic and logic + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let rdValue := 0 + switch funct7 + case 1 { + // RV M extension + switch funct3 + case 0 { + // 000 = MUL: signed x signed + rdValue := mul64(rs1Value, rs2Value) + } + case 1 { + // 001 = MULH: upper bits of signed x signed + rdValue := + u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value)))) + } + case 2 { + // 010 = MULHSU: upper bits of signed x unsigned + rdValue := u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value)))) + } + case 3 { + // 011 = MULHU: upper bits of unsigned x unsigned + rdValue := u256ToU64(shr(toU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value)))) + } + case 4 { + // 100 = DIV + switch rs2Value + case 0 { rdValue := u64Mask() } + default { rdValue := sdiv64(rs1Value, rs2Value) } + } + case 5 { + // 101 = DIVU + switch rs2Value + case 0 { rdValue := u64Mask() } + default { rdValue := div64(rs1Value, rs2Value) } + } + case 6 { + // 110 = REM + switch rs2Value + case 0 { rdValue := rs1Value } + default { rdValue := smod64(rs1Value, rs2Value) } + } + case 7 { + // 111 = REMU + switch rs2Value + case 0 { rdValue := rs1Value } + default { rdValue := mod64(rs1Value, rs2Value) } + } + } + default { + switch funct3 + case 0 { + // 000 = ADD/SUB + switch funct7 + case 0x00 { + // 0000000 = ADD + rdValue := add64(rs1Value, rs2Value) + } + case 0x20 { + // 0100000 = SUB + rdValue := sub64(rs1Value, rs2Value) + } + } + case 1 { + // 001 = SLL + rdValue := shl64(and64(rs2Value, toU64(0x3F)), rs1Value) // only the low 6 bits are consider in + // RV6VI + } + case 2 { + // 010 = SLT + rdValue := slt64(rs1Value, rs2Value) + } + case 3 { + // 011 = SLTU + rdValue := lt64(rs1Value, rs2Value) + } + case 4 { + // 100 = XOR + rdValue := xor64(rs1Value, rs2Value) + } + case 5 { + // 101 = SR~ + switch funct7 + case 0x00 { + // 0000000 = SRL + rdValue := shr64(and64(rs2Value, toU64(0x3F)), rs1Value) // logical: fill with zeroes + } + case 0x20 { + // 0100000 = SRA + rdValue := sar64(and64(rs2Value, toU64(0x3F)), rs1Value) // arithmetic: sign bit is extended + } + } + case 6 { + // 110 = OR + rdValue := or64(rs1Value, rs2Value) + } + case 7 { + // 111 = AND + rdValue := and64(rs1Value, rs2Value) + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x3B { + // 011_1011: register arithmetic and logic in 32 bits + let rs1Value := getRegister(rs1) + let rs2Value := getRegister(rs2) + let rdValue := 0 + switch funct7 + case 1 { + // RV M extension + switch funct3 + case 0 { + // 000 = MULW + rdValue := mask32Signed64(mul64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + case 4 { + // 100 = DIVW + switch rs2Value + case 0 { rdValue := u64Mask() } + default { + rdValue := mask32Signed64(sdiv64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) + } + } + case 5 { + // 101 = DIVUW + switch rs2Value + case 0 { rdValue := u64Mask() } + default { + rdValue := mask32Signed64(div64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + case 6 { + // 110 = REMW + switch rs2Value + case 0 { rdValue := mask32Signed64(rs1Value) } + default { + rdValue := mask32Signed64(smod64(mask32Signed64(rs1Value), mask32Signed64(rs2Value))) + } + } + case 7 { + // 111 = REMUW + switch rs2Value + case 0 { rdValue := mask32Signed64(rs1Value) } + default { + rdValue := mask32Signed64(mod64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + } + default { + switch funct3 + case 0 { + // 000 = ADDW/SUBW + switch funct7 + case 0x00 { + // 0000000 = ADDW + rdValue := mask32Signed64(add64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + case 0x20 { + // 0100000 = SUBW + rdValue := mask32Signed64(sub64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask()))) + } + } + case 1 { + // 001 = SLLW + rdValue := mask32Signed64(shl64(and64(rs2Value, toU64(0x1F)), rs1Value)) + } + case 5 { + // 101 = SR~ + let shamt := and64(rs2Value, toU64(0x1F)) + switch funct7 + case 0x00 { + // 0000000 = SRLW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31)) + } + case 0x20 { + // 0100000 = SRAW + rdValue := signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt)) + } + } + } + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x37 { + // 011_0111: LUI = Load upper immediate + let imm := parseImmTypeU(instr) + let rdValue := shl64(toU64(12), imm) + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x17 { + // 001_0111: AUIPC = Add upper immediate to PC + let imm := parseImmTypeU(instr) + let rdValue := add64(_pc, signExtend64(shl64(toU64(12), imm), toU64(31))) + setRegister(rd, rdValue) + setPC(add64(_pc, toU64(4))) + } + case 0x6F { + // 110_1111: JAL = Jump and link + let imm := parseImmTypeJ(instr) + let rdValue := add64(_pc, toU64(4)) + setRegister(rd, rdValue) + setPC(add64(_pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 + // bytes (last bit is there, but ignored) + } + case 0x67 { + // 110_0111: JALR = Jump and link register + let rs1Value := getRegister(rs1) + let imm := parseImmTypeI(instr) + let rdValue := add64(_pc, toU64(4)) + setRegister(rd, rdValue) + setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least + // significant bit is set to 0 + } + case 0x73 { + // 111_0011: environment things + switch funct3 + case 0 { + // 000 = ECALL/EBREAK + switch shr64(toU64(20), instr) + // I-type, top 12 bits + case 0 { + // imm12 = 000000000000 ECALL + sysCall(_localContext) + setPC(add64(_pc, toU64(4))) + } + default { + // imm12 = 000000000001 EBREAK + setPC(add64(_pc, toU64(4))) // ignore breakpoint + } + } + default { + // CSR instructions + setRegister(rd, toU64(0)) // ignore CSR instructions + setPC(add64(_pc, toU64(4))) + } + } + case 0x2F { + // 010_1111: RV{32,64}A and RV{32,64}A atomic operations extension + // acquire and release bits: + // aq := and64(shr64(toU64(1), funct7), toU64(1)) + // rl := and64(funct7, toU64(1)) + // if none set: unordered + // if aq is set: no following mem ops observed before acquire mem op + // if rl is set: release mem op not observed before earlier mem ops + // if both set: sequentially consistent + // These are no-op here because there is no pipeline of mem ops to acquire/release. + + // 0b010 == RV32A W variants + // 0b011 == RV64A D variants + let size := shl64(funct3, toU64(1)) + if or(lt64(size, toU64(4)), gt64(size, toU64(8))) { revertWithCode(0xbada70) } // bad AMO size + + let addr := getRegister(rs1) + if and64(addr, toU64(3)) { + // quick addr alignment check + revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes + } + + let op := shr64(toU64(2), funct7) + switch op + case 0x2 { + // 00010 = LR = Load Reserved + let v := loadMem(addr, size, true, 1, 2) + setRegister(rd, v) + setLoadReservation(addr) + } + case 0x3 { + // 00011 = SC = Store Conditional + let rdValue := toU64(1) + if eq64(addr, getLoadReservation()) { + let rs2Value := getRegister(rs2) + storeMem(addr, size, rs2Value, 1, 2) + rdValue := toU64(0) + } + setRegister(rd, rdValue) + setLoadReservation(toU64(0)) + } + default { + // AMO: Atomic Memory Operation + let rs2Value := getRegister(rs2) + if eq64(size, toU64(4)) { rs2Value := mask32Signed64(rs2Value) } + let value := rs2Value + let v := loadMem(addr, size, true, 1, 2) + let rdValue := v + switch op + case 0x0 { + // 00000 = AMOADD = add + v := add64(v, value) + } + case 0x1 { + // 00001 = AMOSWAP + v := value + } + case 0x4 { + // 00100 = AMOXOR = xor + v := xor64(v, value) + } + case 0x8 { + // 01000 = AMOOR = or + v := or64(v, value) + } + case 0xc { + // 01100 = AMOAND = and + v := and64(v, value) + } + case 0x10 { + // 10000 = AMOMIN = min signed + if slt64(value, v) { v := value } + } + case 0x14 { + // 10100 = AMOMAX = max signed + if sgt64(value, v) { v := value } + } + case 0x18 { + // 11000 = AMOMINU = min unsigned + if lt64(value, v) { v := value } + } + case 0x1c { + // 11100 = AMOMAXU = max unsigned + if gt64(value, v) { v := value } + } + default { revertWithCode(0xf001a70) } // unknown atomic operation + + storeMem(addr, size, v, 1, 3) // after overwriting 1, proof 2 is no longer valid + setRegister(rd, rdValue) + } + setPC(add64(_pc, toU64(4))) + } + case 0x0F { + // 000_1111: fence + // Used to impose additional ordering constraints; flushing the mem operation pipeline. + // This VM doesn't have a pipeline, nor additional harts, so this is a no-op. + // FENCE / FENCE.TSO / FENCE.I all no-op: there's nothing to synchronize. + setPC(add64(_pc, toU64(4))) + } + case 0x07 { + // FLW/FLD: floating point load word/double + setPC(add64(_pc, toU64(4))) // no-op this. + } + case 0x27 { + // FSW/FSD: floating point store word/double + setPC(add64(_pc, toU64(4))) // no-op this. + } + case 0x53 { + // FADD etc. no-op is enough to pass Go runtime check + setPC(add64(_pc, toU64(4))) // no-op this. + } + default { revertWithCode(0xf001c0de) } // unknown instruction opcode + + mstore(0, computeStateHash()) + return(0, 0x20) + } + } +} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol b/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol deleted file mode 100644 index 4d15862d435..00000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/AttestationStation.sol +++ /dev/null @@ -1,59 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; - -/// @title AttestationStation -/// @author Optimism Collective -/// @author Gitcoin -/// @notice Where attestations live. -contract AttestationStation is ISemver { - /// @notice Struct representing data that is being attested. - /// @custom:field about Address for which the attestation is about. - /// @custom:field key A bytes32 key for the attestation. - /// @custom:field val The attestation as arbitrary bytes. - struct AttestationData { - address about; - bytes32 key; - bytes val; - } - - /// @notice Maps addresses to attestations. Creator => About => Key => Value. - mapping(address => mapping(address => mapping(bytes32 => bytes))) public attestations; - - /// @notice Emitted when Attestation is created. - /// @param creator Address that made the attestation. - /// @param about Address attestation is about. - /// @param key Key of the attestation. - /// @param val Value of the attestation. - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.1 - string public constant version = "1.2.1-beta.1"; - - /// @notice Allows anyone to create an attestation. - /// @param _about Address that the attestation is about. - /// @param _key A key used to namespace the attestation. - /// @param _val An arbitrary value stored as part of the attestation. - function attest(address _about, bytes32 _key, bytes memory _val) public { - attestations[msg.sender][_about][_key] = _val; - - emit AttestationCreated(msg.sender, _about, _key, _val); - } - - /// @notice Allows anyone to create attestations. - /// @param _attestations An array of AttestationData structs. - function attest(AttestationData[] calldata _attestations) external { - uint256 length = _attestations.length; - for (uint256 i = 0; i < length;) { - AttestationData memory attestation = _attestations[i]; - - attest(attestation.about, attestation.key, attestation.val); - - unchecked { - ++i; - } - } - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol b/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol deleted file mode 100644 index b15c0f00044..00000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/Optimist.sol +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { ERC721BurnableUpgradeable } from - "@openzeppelin/contracts-upgradeable/token/ERC721/extensions/ERC721BurnableUpgradeable.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; - -/// @author Optimism Collective -/// @author Gitcoin -/// @title Optimist -/// @notice A Soul Bound Token for real humans only(tm). -contract Optimist is ERC721BurnableUpgradeable, ISemver { - /// @notice Attestation key used by the attestor to attest the baseURI. - bytes32 public constant BASE_URI_ATTESTATION_KEY = bytes32("optimist.base-uri"); - - /// @notice Attestor who attests to baseURI. - address public immutable BASE_URI_ATTESTOR; - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Address of the OptimistAllowlist contract. - OptimistAllowlist public immutable OPTIMIST_ALLOWLIST; - - /// @notice Semantic version. - /// @custom:semver 2.1.1-beta.1 - string public constant version = "2.1.1-beta.1"; - - /// @param _name Token name. - /// @param _symbol Token symbol. - /// @param _baseURIAttestor Address of the baseURI attestor. - /// @param _attestationStation Address of the AttestationStation contract. - /// @param _optimistAllowlist Address of the OptimistAllowlist contract - constructor( - string memory _name, - string memory _symbol, - address _baseURIAttestor, - AttestationStation _attestationStation, - OptimistAllowlist _optimistAllowlist - ) { - BASE_URI_ATTESTOR = _baseURIAttestor; - ATTESTATION_STATION = _attestationStation; - OPTIMIST_ALLOWLIST = _optimistAllowlist; - initialize(_name, _symbol); - } - - /// @notice Initializes the Optimist contract. - /// @param _name Token name. - /// @param _symbol Token symbol. - function initialize(string memory _name, string memory _symbol) public initializer { - __ERC721_init(_name, _symbol); - __ERC721Burnable_init(); - } - - /// @notice Allows an address to mint an Optimist NFT. Token ID is the uint256 representation - /// of the recipient's address. Recipients must be permitted to mint, eventually anyone - /// will be able to mint. One token per address. - /// @param _recipient Address of the token recipient. - function mint(address _recipient) public { - require(isOnAllowList(_recipient), "Optimist: address is not on allowList"); - _safeMint(_recipient, tokenIdOfAddress(_recipient)); - } - - /// @notice Returns the baseURI for all tokens. - /// @return uri_ BaseURI for all tokens. - function baseURI() public view returns (string memory uri_) { - uri_ = string( - abi.encodePacked( - ATTESTATION_STATION.attestations(BASE_URI_ATTESTOR, address(this), bytes32("optimist.base-uri")) - ) - ); - } - - /// @notice Returns the token URI for a given token by ID - /// @param _tokenId Token ID to query. - /// @return uri_ Token URI for the given token by ID. - function tokenURI(uint256 _tokenId) public view virtual override returns (string memory uri_) { - uri_ = string( - abi.encodePacked( - baseURI(), - "/", - // Properly format the token ID as a 20 byte hex string (address). - Strings.toHexString(_tokenId, 20), - ".json" - ) - ); - } - - /// @notice Checks OptimistAllowlist to determine whether a given address is allowed to mint - /// the Optimist NFT. Since the Optimist NFT will also be used as part of the - /// Citizens House, mints are currently restricted. Eventually anyone will be able - /// to mint. - /// @return allowed_ Whether or not the address is allowed to mint yet. - function isOnAllowList(address _recipient) public view returns (bool allowed_) { - allowed_ = OPTIMIST_ALLOWLIST.isAllowedToMint(_recipient); - } - - /// @notice Returns the token ID for the token owned by a given address. This is the uint256 - /// representation of the given address. - /// @return Token ID for the token owned by the given address. - function tokenIdOfAddress(address _owner) public pure returns (uint256) { - return uint256(uint160(_owner)); - } - - /// @notice Disabled for the Optimist NFT (Soul Bound Token). - function approve(address, uint256) public pure override { - revert("Optimist: soul bound token"); - } - - /// @notice Disabled for the Optimist NFT (Soul Bound Token). - function setApprovalForAll(address, bool) public virtual override { - revert("Optimist: soul bound token"); - } - - /// @notice Prevents transfers of the Optimist NFT (Soul Bound Token). - /// @param _from Address of the token sender. - /// @param _to Address of the token recipient. - function _beforeTokenTransfer(address _from, address _to, uint256) internal virtual override { - require(_from == address(0) || _to == address(0), "Optimist: soul bound token"); - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol b/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol deleted file mode 100644 index ffa46116a4c..00000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/OptimistAllowlist.sol +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -/// @title OptimistAllowlist -/// @notice Source of truth for whether an address is able to mint an Optimist NFT. -/// isAllowedToMint function checks various signals to return boolean value -/// for whether an address is eligible or not. -contract OptimistAllowlist is ISemver { - /// @notice Attestation key used by the AllowlistAttestor to manually add addresses to the - /// allowlist. - bytes32 public constant OPTIMIST_CAN_MINT_ATTESTATION_KEY = bytes32("optimist.can-mint"); - - /// @notice Attestation key used by Coinbase to issue attestations for Quest participants. - bytes32 public constant COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY = bytes32("coinbase.quest-eligible"); - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Attestor that issues 'optimist.can-mint' attestations. - address public immutable ALLOWLIST_ATTESTOR; - - /// @notice Attestor that issues 'coinbase.quest-eligible' attestations. - address public immutable COINBASE_QUEST_ATTESTOR; - - /// @notice Address of OptimistInviter contract that issues 'optimist.can-mint-from-invite' - /// attestations. - address public immutable OPTIMIST_INVITER; - - /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; - - /// @param _attestationStation Address of the AttestationStation contract. - /// @param _allowlistAttestor Address of the allowlist attestor. - /// @param _coinbaseQuestAttestor Address of the Coinbase Quest attestor. - /// @param _optimistInviter Address of the OptimistInviter contract. - constructor( - AttestationStation _attestationStation, - address _allowlistAttestor, - address _coinbaseQuestAttestor, - address _optimistInviter - ) { - ATTESTATION_STATION = _attestationStation; - ALLOWLIST_ATTESTOR = _allowlistAttestor; - COINBASE_QUEST_ATTESTOR = _coinbaseQuestAttestor; - OPTIMIST_INVITER = _optimistInviter; - } - - /// @notice Checks whether a given address is allowed to mint the Optimist NFT yet. Since the - /// Optimist NFT will also be used as part of the Citizens House, mints are currently - /// restricted. Eventually anyone will be able to mint. - /// Currently, address is allowed to mint if it satisfies any of the following: - /// 1) Has a valid 'optimist.can-mint' attestation from the allowlist attestor. - /// 2) Has a valid 'coinbase.quest-eligible' attestation from Coinbase Quest attestor - /// 3) Has a valid 'optimist.can-mint-from-invite' attestation from the OptimistInviter - /// contract. - /// @param _claimer Address to check. - /// @return allowed_ Whether or not the address is allowed to mint yet. - function isAllowedToMint(address _claimer) public view returns (bool allowed_) { - allowed_ = _hasAttestationFromAllowlistAttestor(_claimer) || _hasAttestationFromCoinbaseQuestAttestor(_claimer) - || _hasAttestationFromOptimistInviter(_claimer); - } - - /// @notice Checks whether an address has a valid 'optimist.can-mint' attestation from the - /// allowlist attestor. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromAllowlistAttestor(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is bytes32("true") - valid_ = _hasValidAttestation(ALLOWLIST_ATTESTOR, _claimer, OPTIMIST_CAN_MINT_ATTESTATION_KEY); - } - - /// @notice Checks whether an address has a valid attestation from the Coinbase attestor. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromCoinbaseQuestAttestor(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is bytes32("true") - valid_ = _hasValidAttestation(COINBASE_QUEST_ATTESTOR, _claimer, COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY); - } - - /// @notice Checks whether an address has a valid attestation from the OptimistInviter contract. - /// @param _claimer Address to check. - /// @return valid_ Whether or not the address has a valid attestation. - function _hasAttestationFromOptimistInviter(address _claimer) internal view returns (bool valid_) { - // Expected attestation value is the inviter's address - valid_ = _hasValidAttestation( - OPTIMIST_INVITER, _claimer, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY - ); - } - - /// @notice Checks whether an address has a valid truthy attestation. - /// Any attestation val other than bytes32("") is considered truthy. - /// @param _creator Address that made the attestation. - /// @param _about Address attestation is about. - /// @param _key Key of the attestation. - /// @return valid_ Whether or not the address has a valid truthy attestation. - function _hasValidAttestation(address _creator, address _about, bytes32 _key) internal view returns (bool valid_) { - valid_ = ATTESTATION_STATION.attestations(_creator, _about, _key).length > 0; - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol b/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol deleted file mode 100644 index ae0ab9d9265..00000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/OptimistInviter.sol +++ /dev/null @@ -1,235 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; -import { ISemver } from "src/universal/interfaces/ISemver.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { SignatureChecker } from "@openzeppelin/contracts/utils/cryptography/SignatureChecker.sol"; -import { EIP712Upgradeable } from "@openzeppelin/contracts-upgradeable/utils/cryptography/draft-EIP712Upgradeable.sol"; - -/// @custom:upgradeable -/// @title OptimistInviter -/// @notice OptimistInviter issues "optimist.can-invite" and "optimist.can-mint-from-invite" -/// attestations. Accounts that have invites can issue signatures that allow other -/// accounts to claim an invite. The invitee uses a claim and reveal flow to claim the -/// invite to an address of their choosing. -/// -/// Parties involved: -/// 1) INVITE_GRANTER: trusted account that can allow accounts to issue invites -/// 2) issuer: account that is allowed to issue invites -/// 3) claimer: account that receives the invites -/// -/// Flow: -/// 1) INVITE_GRANTER calls _setInviteCount to allow an issuer to issue a certain number -/// of invites, and also creates a "optimist.can-invite" attestation for the issuer -/// 2) Off-chain, the issuer signs (EIP-712) a ClaimableInvite to produce a signature -/// 3) Off-chain, invite issuer sends the plaintext ClaimableInvite and the signature -/// to the recipient -/// 4) claimer chooses an address they want to receive the invite on -/// 5) claimer commits the hash of the address they want to receive the invite on and the -/// received signature keccak256(abi.encode(addressToReceiveTo, receivedSignature)) -/// using the commitInvite function -/// 6) claimer waits for the MIN_COMMITMENT_PERIOD to pass. -/// 7) claimer reveals the plaintext ClaimableInvite and the signature using the -/// claimInvite function, receiving the "optimist.can-mint-from-invite" attestation -contract OptimistInviter is ISemver, EIP712Upgradeable { - /// @notice Emitted when an invite is claimed. - /// @param issuer Address that issued the signature. - /// @param claimer Address that claimed the invite. - event InviteClaimed(address indexed issuer, address indexed claimer); - - /// @notice Version used for the EIP712 domain separator. This version is separated from the - /// contract semver because the EIP712 domain separator is used to sign messages, and - /// changing the domain separator invalidates all existing signatures. We should only - /// bump this version if we make a major change to the signature scheme. - string public constant EIP712_VERSION = "1.0.0"; - - /// @notice EIP712 typehash for the ClaimableInvite type. - bytes32 public constant CLAIMABLE_INVITE_TYPEHASH = keccak256("ClaimableInvite(address issuer,bytes32 nonce)"); - - /// @notice Attestation key for that signals that an account was allowed to issue invites - bytes32 public constant CAN_INVITE_ATTESTATION_KEY = bytes32("optimist.can-invite"); - - /// @notice Granter who can set accounts' invite counts. - address public immutable INVITE_GRANTER; - - /// @notice Address of the AttestationStation contract. - AttestationStation public immutable ATTESTATION_STATION; - - /// @notice Minimum age of a commitment (in seconds) before it can be revealed using - /// claimInvite. Currently set to 60 seconds. - /// - /// Prevents an attacker from front-running a commitment by taking the signature in the - /// claimInvite call and quickly committing and claiming it before the the claimer's - /// transaction succeeds. With this, frontrunning a commitment requires that an attacker - /// be able to prevent the honest claimer's claimInvite transaction from being included - /// for this long. - uint256 public constant MIN_COMMITMENT_PERIOD = 60; - - /// @notice Struct that represents a claimable invite that will be signed by the issuer. - /// @custom:field issuer Address that issued the signature. Reason this is explicitly included, - /// and not implicitly assumed to be the recovered address from the - /// signature is that the issuer may be using a ERC-1271 compatible - /// contract wallet, where the recovered address is not the same as the - /// issuer, or the signature is not an ECDSA signature at all. - /// @custom:field nonce Pseudorandom nonce to prevent replay attacks. - struct ClaimableInvite { - address issuer; - bytes32 nonce; - } - - /// @notice Maps from hashes to the timestamp when they were committed. - mapping(bytes32 => uint256) public commitmentTimestamps; - - /// @notice Maps from addresses to nonces to whether or not they have been used. - mapping(address => mapping(bytes32 => bool)) public usedNonces; - - /// @notice Maps from addresses to number of invites they have. - mapping(address => uint256) public inviteCounts; - - /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.1 - string public constant version = "1.1.1-beta.1"; - - /// @param _inviteGranter Address of the invite granter. - /// @param _attestationStation Address of the AttestationStation contract. - constructor(address _inviteGranter, AttestationStation _attestationStation) { - INVITE_GRANTER = _inviteGranter; - ATTESTATION_STATION = _attestationStation; - } - - /// @notice Initializes this contract, setting the EIP712 context. - /// Only update the EIP712_VERSION when there is a change to the signature scheme. - /// After the EIP712 version is changed, any signatures issued off-chain but not - /// claimed yet will no longer be accepted by the claimInvite function. Please make - /// sure to notify the issuers that they must re-issue their invite signatures. - /// @param _name Contract name. - function initialize(string memory _name) public initializer { - __EIP712_init(_name, EIP712_VERSION); - } - - /// @notice Allows invite granter to set the number of invites an address has. - /// @param _accounts An array of accounts to update the invite counts of. - /// @param _inviteCount Number of invites to set to. - function setInviteCounts(address[] calldata _accounts, uint256 _inviteCount) public { - // Only invite granter can grant invites - require(msg.sender == INVITE_GRANTER, "OptimistInviter: only invite granter can grant invites"); - - uint256 length = _accounts.length; - - AttestationStation.AttestationData[] memory attestations = new AttestationStation.AttestationData[](length); - - for (uint256 i; i < length;) { - // Set invite count for account to _inviteCount - inviteCounts[_accounts[i]] = _inviteCount; - - // Create an attestation for posterity that the account is allowed to create invites - attestations[i] = AttestationStation.AttestationData({ - about: _accounts[i], - key: CAN_INVITE_ATTESTATION_KEY, - val: bytes("true") - }); - - unchecked { - ++i; - } - } - - ATTESTATION_STATION.attest(attestations); - } - - /// @notice Allows anyone (but likely the claimer) to commit a received signature along with the - /// address to claim to. - /// - /// Before calling this function, the claimer should have received a signature from the - /// issuer off-chain. The claimer then calls this function with the hash of the - /// claimer's address and the received signature. This is necessary to prevent - /// front-running when the invitee is claiming the invite. Without a commit and reveal - /// scheme, anyone who is watching the mempool can take the signature being submitted - /// and front run the transaction to claim the invite to their own address. - /// - /// The same commitment can only be made once, and the function reverts if the - /// commitment has already been made. This prevents griefing where a malicious party can - /// prevent the original claimer from being able to claimInvite. - /// @param _commitment A hash of the claimer and signature concatenated. - /// keccak256(abi.encode(_claimer, _signature)) - function commitInvite(bytes32 _commitment) public { - // Check that the commitment hasn't already been made. This prevents griefing where - // a malicious party continuously re-submits the same commitment, preventing the original - // claimer from claiming their invite by resetting the minimum commitment period. - require(commitmentTimestamps[_commitment] == 0, "OptimistInviter: commitment already made"); - - commitmentTimestamps[_commitment] = block.timestamp; - } - - /// @notice Allows anyone to reveal a commitment and claim an invite. - /// The hash, keccak256(abi.encode(_claimer, _signature)), should have been already - /// committed using commitInvite. Before issuing the "optimist.can-mint-from-invite" - /// attestation, this function checks that - /// 1) the hash corresponding to the _claimer and the _signature was committed - /// 2) MIN_COMMITMENT_PERIOD has passed since the commitment was made. - /// 3) the _signature is signed correctly by the issuer - /// 4) the _signature hasn't already been used to claim an invite before - /// 5) the _signature issuer has not used up all of their invites - /// This function doesn't require that the _claimer is calling this function. - /// @param _claimer Address that will be granted the invite. - /// @param _claimableInvite ClaimableInvite struct containing the issuer and nonce. - /// @param _signature Signature signed over the claimable invite. - function claimInvite(address _claimer, ClaimableInvite calldata _claimableInvite, bytes memory _signature) public { - uint256 commitmentTimestamp = commitmentTimestamps[keccak256(abi.encode(_claimer, _signature))]; - - // Make sure the claimer and signature have been committed. - require(commitmentTimestamp > 0, "OptimistInviter: claimer and signature have not been committed yet"); - - // Check that MIN_COMMITMENT_PERIOD has passed since the commitment was made. - require( - commitmentTimestamp + MIN_COMMITMENT_PERIOD <= block.timestamp, - "OptimistInviter: minimum commitment period has not elapsed yet" - ); - - // Generate a EIP712 typed data hash to compare against the signature. - bytes32 digest = _hashTypedDataV4( - keccak256(abi.encode(CLAIMABLE_INVITE_TYPEHASH, _claimableInvite.issuer, _claimableInvite.nonce)) - ); - - // Uses SignatureChecker, which supports both regular ECDSA signatures from EOAs as well as - // ERC-1271 signatures from contract wallets or multi-sigs. This means that if the issuer - // wants to revoke a signature, they can use a smart contract wallet to issue the signature, - // then invalidate the signature after issuing it. - require( - SignatureChecker.isValidSignatureNow(_claimableInvite.issuer, digest, _signature), - "OptimistInviter: invalid signature" - ); - - // The issuer's signature commits to a nonce to prevent replay attacks. - // This checks that the nonce has not been used for this issuer before. The nonces are - // scoped to the issuer address, so the same nonce can be used by different issuers without - // clashing. - require( - usedNonces[_claimableInvite.issuer][_claimableInvite.nonce] == false, - "OptimistInviter: nonce has already been used" - ); - - // Set the nonce as used for the issuer so that it cannot be replayed. - usedNonces[_claimableInvite.issuer][_claimableInvite.nonce] = true; - - // Failing this check means that the issuer has used up all of their existing invites. - require(inviteCounts[_claimableInvite.issuer] > 0, "OptimistInviter: issuer has no invites"); - - // Reduce the issuer's invite count by 1. Can be unchecked because we check above that - // count is > 0. - unchecked { - --inviteCounts[_claimableInvite.issuer]; - } - - // Create the attestation that the claimer can mint from the issuer's invite. - // The invite issuer is included in the data of the attestation. - ATTESTATION_STATION.attest( - _claimer, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(_claimableInvite.issuer) - ); - - emit InviteClaimed(_claimableInvite.issuer, _claimer); - } -} diff --git a/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol b/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol deleted file mode 100644 index 225f7788949..00000000000 --- a/packages/contracts-bedrock/src/periphery/op-nft/libraries/OptimistConstants.sol +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -/// @title OptimistConstants -/// @notice Library for storing Optimist related constants that are shared in multiple contracts. -library OptimistConstants { - /// @notice Attestation key issued by OptimistInviter allowing the attested account to mint. - bytes32 internal constant OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY = bytes32("optimist.can-mint-from-invite"); -} diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index a2ab917a0d2..921330060b3 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -58,6 +58,7 @@ contract DataAvailabilityChallengeTest is CommonTest { // EntryPoint will revert if using amount > type(uint112).max. vm.assume(sender != Preinstalls.EntryPoint_v060); vm.assume(sender != address(dataAvailabilityChallenge)); + vm.assume(sender != deploy.mustGetAddress("DataAvailabilityChallenge")); vm.assume(sender.balance == 0); vm.deal(sender, amount); diff --git a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol b/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol deleted file mode 100644 index a0dd2d8c138..00000000000 --- a/packages/contracts-bedrock/test/L1/DelayedVetoable.t.sol +++ /dev/null @@ -1,262 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -import { Test } from "forge-std/Test.sol"; -import { IDelayedVetoable } from "src/L1/interfaces/IDelayedVetoable.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; - -contract DelayedVetoable_Init is Test { - error Unauthorized(address expected, address actual); - error ForwardingEarly(); - - event Initiated(bytes32 indexed callHash, bytes data); - event Forwarded(bytes32 indexed callHash, bytes data); - event Vetoed(bytes32 indexed callHash, bytes data); - - address target; - address initiator; - address vetoer; - uint256 operatingDelay = 14 days; - IDelayedVetoable delayedVetoable; - - function setUp() public { - initiator = makeAddr("initiator"); - vetoer = makeAddr("vetoer"); - target = makeAddr("target"); - vm.deal(initiator, 10000 ether); - vm.deal(vetoer, 10000 ether); - - delayedVetoable = IDelayedVetoable( - DeployUtils.create1({ - _name: "DelayedVetoable", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IDelayedVetoable.__constructor__, (vetoer, initiator, address(target), operatingDelay)) - ) - }) - ); - - // Most tests will use the operating delay, so we call as the initiator with null data - // to set the delay. For tests that need to use the initial zero delay, we'll modify the - // value in storage. - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(hex""); - assertTrue(success); - } - - /// @dev This function is used to prevent initiating the delay unintentionally. - /// It should only be used on tests prior to the delay being activated. - /// @param data The data to be used in the call. - function assumeNonzeroData(bytes memory data) internal pure { - vm.assume(data.length > 0); - } - - /// @dev This function is used to ensure that the data does not clash with the queuedAt function selector. - /// @param data The data to be used in the call. - function assumeNoClash(bytes calldata data) internal pure { - if (data.length >= 4) { - vm.assume(bytes4(data[0:4]) != bytes4(keccak256("queuedAt(bytes32)"))); - } - } -} - -contract DelayedVetoable_Getters_Test is DelayedVetoable_Init { - /// @dev The getters return the expected values when called by the zero address. - function test_getters_succeeds() external { - vm.startPrank(address(0)); - assertEq(delayedVetoable.initiator(), initiator); - assertEq(delayedVetoable.vetoer(), vetoer); - assertEq(delayedVetoable.target(), target); - assertEq(delayedVetoable.delay(), operatingDelay); - assertEq(delayedVetoable.queuedAt(keccak256(abi.encode(0))), 0); - } -} - -contract DelayedVetoable_Getters_TestFail is DelayedVetoable_Init { - /// @dev Check that getter calls from unauthorized entities will revert. - function test_getters_notZeroAddress_reverts() external { - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.initiator(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.vetoer(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.target(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.delay(); - vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, initiator, address(this))); - delayedVetoable.queuedAt(keccak256(abi.encode(0))); - } -} - -contract DelayedVetoable_HandleCall_Test is DelayedVetoable_Init { - /// @dev A call can be initiated by the initiator. - function testFuzz_handleCall_initiation_succeeds(bytes calldata data) external { - assumeNoClash(data); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Initiated(keccak256(data), data); - - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - } - - /// @dev The delay is inititially set to zero and the call is immediately forwarded. - function testFuzz_handleCall_initialForwardingImmediately_succeeds( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNonzeroData(inData); - assumeNoClash(inData); - - // Reset the delay to zero - vm.store(address(delayedVetoable), bytes32(uint256(0)), bytes32(uint256(0))); - - vm.mockCall(target, inData, outData); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - vm.expectCall({ callee: target, data: inData }); - emit Forwarded(keccak256(inData), inData); - vm.prank(initiator); - (bool success, bytes memory returnData) = address(delayedVetoable).call(inData); - assertTrue(success); - assertEq(returnData, outData); - - // Check that the callHash is not stored for future forwarding - bytes32 callHash = keccak256(inData); - vm.prank(address(0)); - assertEq(delayedVetoable.queuedAt(callHash), 0); - } - - /// @dev Calls are not forwarded until the delay has passed. - function testFuzz_handleCall_forwardingWithDelay_succeeds(bytes calldata data) external { - assumeNonzeroData(data); - assumeNoClash(data); - - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - - // Check that the call is in the _queuedAt mapping - bytes32 callHash = keccak256(data); - vm.prank(address(0)); - assertEq(delayedVetoable.queuedAt(callHash), block.timestamp); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(data), data); - - vm.expectCall({ callee: target, data: data }); - (success,) = address(delayedVetoable).call(data); - assertTrue(success); - } -} - -contract DelayedVetoable_HandleCall_TestFail is DelayedVetoable_Init { - /// @dev Only the initiator can initiate a call. - function test_handleCall_unauthorizedInitiation_reverts() external { - vm.expectRevert(abi.encodeWithSelector(IDelayedVetoable.Unauthorized.selector, initiator, address(this))); - (bool revertsAsExpected,) = address(delayedVetoable).call(hex"00001234"); - assertTrue(revertsAsExpected); - } - - /// @dev The call cannot be forwarded until the delay has passed. - function testFuzz_handleCall_forwardingTooSoon_reverts(bytes calldata data) external { - assumeNoClash(data); - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - - vm.expectRevert(IDelayedVetoable.ForwardingEarly.selector); - (bool revertsAsExpected,) = address(delayedVetoable).call(data); - assertTrue(revertsAsExpected); - } - - /// @dev The call cannot be forwarded a second time. - function testFuzz_handleCall_forwardingTwice_reverts(bytes calldata data) external { - assumeNoClash(data); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(data); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(data), data); - - // Forward the call - vm.expectCall({ callee: target, data: data }); - (success,) = address(delayedVetoable).call(data); - assertTrue(success); - - // Attempt to forward the same call again. - vm.expectRevert(abi.encodeWithSelector(IDelayedVetoable.Unauthorized.selector, initiator, address(this))); - (bool revertsAsExpected,) = address(delayedVetoable).call(data); - assertTrue(revertsAsExpected); - } - - /// @dev If the target reverts, it is bubbled up. - function testFuzz_handleCall_forwardingTargetReverts_reverts( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNoClash(inData); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(inData); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(inData), inData); - - vm.mockCallRevert(target, inData, outData); - - // Forward the call - vm.expectRevert(outData); - (bool revertsAsExpected,) = address(delayedVetoable).call(inData); - assertTrue(revertsAsExpected); - } - - function testFuzz_handleCall_forwardingTargetRetValue_succeeds( - bytes calldata inData, - bytes calldata outData - ) - external - { - assumeNoClash(inData); - - // Initiate the call - vm.prank(initiator); - (bool success,) = address(delayedVetoable).call(inData); - assertTrue(success); - - vm.warp(block.timestamp + operatingDelay); - vm.expectEmit(true, false, false, true, address(delayedVetoable)); - emit Forwarded(keccak256(inData), inData); - - vm.mockCall(target, inData, outData); - - // Forward the call - (bool success2, bytes memory retData) = address(delayedVetoable).call(inData); - assertTrue(success2); - assertEq(keccak256(retData), keccak256(outData)); - } - - /// @dev A test documenting the single instance in which the contract is not 'transparent' to the initiator. - function testFuzz_handleCall_queuedAtClash_reverts() external { - // This will get us calldata with the same function selector as the queuedAt function, but - // with the incorrect input data length. - bytes memory inData = abi.encodePacked(keccak256("queuedAt(bytes32)")); - - // Reset the delay to zero - vm.store(address(delayedVetoable), bytes32(uint256(0)), bytes32(uint256(0))); - - vm.prank(initiator); - vm.expectRevert(bytes("")); - (bool revertsAsExpected,) = address(delayedVetoable).call(inData); - assertTrue(revertsAsExpected); - } -} diff --git a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol index 23c1365e915..22345d860e4 100644 --- a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol @@ -4,6 +4,7 @@ pragma solidity 0.8.15; // Testing utilities import { CommonTest } from "test/setup/CommonTest.sol"; import { Reverter } from "test/mocks/Callers.sol"; +import { stdError } from "forge-std/StdError.sol"; // Libraries import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; @@ -173,21 +174,93 @@ contract L1CrossDomainMessenger_Test is CommonTest { assertEq(l1CrossDomainMessenger.failedMessages(hash), false); } - /// @dev Tests that relayMessage reverts if attempting to relay a message - /// sent to an L1 system contract. - function test_relayMessage_toSystemContract_reverts() external { - // set the target to be the OptimismPortal - address target = address(optimismPortal); + /// @dev Tests that relayMessage reverts if caller is optimismPortal and the value sent does not match the amount + function test_relayMessage_fromOtherMessengerValueMismatch_reverts() external { + address target = alice; address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; bytes memory message = hex"1111"; + // set the value of op.l2Sender() to be the L2CrossDomainMessenger. + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + // correctly sending as OptimismPortal but amount does not match msg.value + vm.deal(address(optimismPortal), 10 ether); + vm.prank(address(optimismPortal)); + vm.expectRevert(stdError.assertionError); + l1CrossDomainMessenger.relayMessage{ value: 10 ether }( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 9 ether, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if a failed message is attempted to be replayed via the optimismPortal + function test_relayMessage_fromOtherMessengerFailedMessageReplay_reverts() external { + address target = alice; + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + // set the value of op.l2Sender() to be the L2 Cross Domain Messenger. + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + // make a failed message + vm.etch(target, hex"fe"); vm.prank(address(optimismPortal)); - vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); l1CrossDomainMessenger.relayMessage( Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message ); - vm.store(address(optimismPortal), 0, bytes32(abi.encode(sender))); + // cannot replay messages when optimism portal is msg.sender + vm.prank(address(optimismPortal)); + vm.expectRevert(stdError.assertionError); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// with l1CrossDomainMessenger as the target + function test_relayMessage_toSelf_reverts() external { + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(address(optimismPortal)); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l1CrossDomainMessenger), + 0, + 0, + message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// with optimismPortal as the target + function test_relayMessage_toOptimismPortal_reverts() external { + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(address(optimismPortal)); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, address(optimismPortal), 0, 0, message + ); + } + + /// @dev Tests that the relayMessage function reverts if the message called by non-optimismPortal but not a failed + /// message + function test_relayMessage_relayingNewMessageByExternalUser_reverts() external { + address target = address(alice); + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + vm.prank(bob); vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); l1CrossDomainMessenger.relayMessage( Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 5b2260fce99..e828b415621 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -14,9 +14,12 @@ import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; contract OPContractsManager_Harness is OPContractsManager { constructor( ISuperchainConfig _superchainConfig, - IProtocolVersions _protocolVersions + IProtocolVersions _protocolVersions, + string memory _l1ContractsRelease, + Blueprints memory _blueprints, + Implementations memory _implementations ) - OPContractsManager(_superchainConfig, _protocolVersions) + OPContractsManager(_superchainConfig, _protocolVersions, _l1ContractsRelease, _blueprints, _implementations) { } function chainIdToBatchInboxAddress_exposed(uint256 l2ChainId) public pure returns (address) { @@ -49,7 +52,7 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); @@ -116,12 +119,17 @@ contract OPContractsManager_InternalMethods_Test is Test { function setUp() public { ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); + OPContractsManager.Blueprints memory emptyBlueprints; + OPContractsManager.Implementations memory emptyImpls; vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); opcmHarness = new OPContractsManager_Harness({ _superchainConfig: superchainConfigProxy, - _protocolVersions: protocolVersionsProxy + _protocolVersions: protocolVersionsProxy, + _l1ContractsRelease: "dev", + _blueprints: emptyBlueprints, + _implementations: emptyImpls }); } diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol index f19e7ca6f5c..8172fb14b79 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal.t.sol @@ -1006,7 +1006,10 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { ) ); - uint256 bobBalanceBefore = address(bob).balance; + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + vm.deal(address(optimismPortal), 0xFFFFFFFF); + uint256 bobBalanceBefore = bob.balance; vm.expectEmit(true, true, true, true); emit WithdrawalProven(_withdrawalHash_noData, alice, bob); @@ -1019,7 +1022,69 @@ contract OptimismPortal_FinalizeWithdrawal_Test is CommonTest { emit WithdrawalFinalized(_withdrawalHash_noData, true); optimismPortal.finalizeWithdrawalTransaction(_defaultTx_noData); - assertEq(address(bob).balance, bobBalanceBefore + 100); + assertEq(bob.balance, bobBalanceBefore + 100); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty and with a custom gas token. + function test_finalizeWithdrawalTransaction_noTxDataNonEtherGasToken_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + + // Configure the oracle to return the output root we've prepared. + vm.mockCall( + address(l2OutputOracle), + abi.encodePacked(IL2OutputOracle.getL2Output.selector), + abi.encode( + Types.OutputProposal( + _outputRoot_noData, + l2OutputOracle.getL2Output(_proposedOutputIndex).timestamp, + uint128(_proposedBlockNumber) + ) + ) + ); + + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + deal(address(L1Token), address(optimismPortal), 0xFFFFFFFF); + // modify the gas token to be non ether + vm.mockCall( + address(systemConfig), abi.encodeCall(systemConfig.gasPayingToken, ()), abi.encode(address(L1Token), 18) + ); + uint256 bobBalanceBefore = L1Token.balanceOf(bob); + + vm.expectEmit(true, true, true, true); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + optimismPortal.proveWithdrawalTransaction( + _defaultTx_noData, _proposedOutputIndex, _outputRootProof_noData, _withdrawalProof_noData + ); + + vm.warp(block.timestamp + l2OutputOracle.FINALIZATION_PERIOD_SECONDS() + 1); + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal.finalizeWithdrawalTransaction(_defaultTx_noData); + + assertEq(L1Token.balanceOf(bob), bobBalanceBefore + 100); } /// @dev Tests that `finalizeWithdrawalTransaction` reverts if the finalization period diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index d252609e5ee..083644755d3 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -802,6 +802,169 @@ contract OptimismPortal2_FinalizeWithdrawal_Test is CommonTest { assert(address(bob).balance == bobBalanceBefore + 100); } + /// @dev Tests that `finalizeWithdrawalTransaction` reverts if the target reverts and caller is the + /// ESTIMATION_ADDRESS. + function test_finalizeWithdrawalTransaction_targetFailsAndCallerIsEstimationAddress_reverts() external { + vm.etch(bob, hex"fe"); // Contract with just the invalid opcode. + + vm.prank(alice); + vm.expectEmit(true, true, true, true); + emit WithdrawalProven(_withdrawalHash, alice, bob); + optimismPortal2.proveWithdrawalTransaction(_defaultTx, _proposedGameIndex, _outputRootProof, _withdrawalProof); + + // Warp and resolve the dispute game. + game.resolveClaim(0, 0); + game.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.startPrank(alice, Constants.ESTIMATION_ADDRESS); + vm.expectRevert(GasEstimation.selector); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty. + function test_finalizeWithdrawalTransaction_noTxData_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + uint256 _proposedBlockNumber_noData = 0xFF; + IFaultDisputeGame game_noData = IFaultDisputeGame( + payable( + address( + disputeGameFactory.create( + optimismPortal2.respectedGameType(), + Claim.wrap(_outputRoot_noData), + abi.encode(_proposedBlockNumber_noData) + ) + ) + ) + ); + uint256 _proposedGameIndex_noData = disputeGameFactory.gameCount() - 1; + // Warp beyond the chess clocks and finalize the game. + vm.warp(block.timestamp + game_noData.maxClockDuration().raw() + 1 seconds); + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal2), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + vm.deal(address(optimismPortal2), 0xFFFFFFFF); + + uint256 bobBalanceBefore = bob.balance; + + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProvenExtension1(_withdrawalHash_noData, address(this)); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx_noData, + _disputeGameIndex: _proposedGameIndex_noData, + _outputRootProof: _outputRootProof_noData, + _withdrawalProof: _withdrawalProof_noData + }); + + // Warp and resolve the dispute game. + game_noData.resolveClaim(0, 0); + game_noData.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx_noData); + + assert(bob.balance == bobBalanceBefore + 100); + } + + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds when _tx.data is empty and with a custom gas token. + function test_finalizeWithdrawalTransaction_noTxDataNonEtherGasToken_succeeds() external { + Types.WithdrawalTransaction memory _defaultTx_noData = Types.WithdrawalTransaction({ + nonce: 0, + sender: alice, + target: bob, + value: 100, + gasLimit: 100_000, + data: hex"" + }); + // Get withdrawal proof data we can use for testing. + ( + bytes32 _stateRoot_noData, + bytes32 _storageRoot_noData, + bytes32 _outputRoot_noData, + bytes32 _withdrawalHash_noData, + bytes[] memory _withdrawalProof_noData + ) = ffi.getProveWithdrawalTransactionInputs(_defaultTx_noData); + // Setup a dummy output root proof for reuse. + Types.OutputRootProof memory _outputRootProof_noData = Types.OutputRootProof({ + version: bytes32(uint256(0)), + stateRoot: _stateRoot_noData, + messagePasserStorageRoot: _storageRoot_noData, + latestBlockhash: bytes32(uint256(0)) + }); + uint256 _proposedBlockNumber_noData = 0xFF; + IFaultDisputeGame game_noData = IFaultDisputeGame( + payable( + address( + disputeGameFactory.create( + optimismPortal2.respectedGameType(), + Claim.wrap(_outputRoot_noData), + abi.encode(_proposedBlockNumber_noData) + ) + ) + ) + ); + uint256 _proposedGameIndex_noData = disputeGameFactory.gameCount() - 1; + // Warp beyond the chess clocks and finalize the game. + vm.warp(block.timestamp + game_noData.maxClockDuration().raw() + 1 seconds); + // Fund the portal so that we can withdraw ETH. + vm.store(address(optimismPortal2), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); + deal(address(L1Token), address(optimismPortal2), 0xFFFFFFFF); + + // modify the gas token to be non ether + vm.mockCall( + address(systemConfig), abi.encodeCall(systemConfig.gasPayingToken, ()), abi.encode(address(L1Token), 18) + ); + + uint256 bobBalanceBefore = L1Token.balanceOf(bob); + + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProven(_withdrawalHash_noData, alice, bob); + vm.expectEmit(address(optimismPortal2)); + emit WithdrawalProvenExtension1(_withdrawalHash_noData, address(this)); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx_noData, + _disputeGameIndex: _proposedGameIndex_noData, + _outputRootProof: _outputRootProof_noData, + _withdrawalProof: _withdrawalProof_noData + }); + + // Warp and resolve the dispute game. + game_noData.resolveClaim(0, 0); + game_noData.resolve(); + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1 seconds); + + vm.expectEmit(true, true, false, true); + emit WithdrawalFinalized(_withdrawalHash_noData, true); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx_noData); + + assert(L1Token.balanceOf(bob) == bobBalanceBefore + 100); + } + /// @dev Tests that `finalizeWithdrawalTransaction` succeeds. function test_finalizeWithdrawalTransaction_provenWithdrawalHashEther_succeeds() external { uint256 bobBalanceBefore = address(bob).balance; @@ -1575,7 +1738,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Deposit the token into the portal - optimismPortal.depositERC20Transaction(_to, _mint, _value, _gasLimit, _isCreation, _data); + optimismPortal2.depositERC20Transaction(_to, _mint, _value, _gasLimit, _isCreation, _data); // Assert final balance equals the deposited amount assertEq(token.balanceOf(address(optimismPortal2)), _mint); @@ -1657,7 +1820,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Mock the token balance - vm.mockCall(address(token), abi.encodeCall(token.balanceOf, (address(optimismPortal))), abi.encode(0)); + vm.mockCall(address(token), abi.encodeCall(token.balanceOf, (address(optimismPortal2))), abi.encode(0)); // Call minimumGasLimit(0) before vm.expectRevert to ensure vm.expectRevert is for depositERC20Transaction uint64 gasLimit = optimismPortal2.minimumGasLimit(0); @@ -1723,7 +1886,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal ); // Deposit the token into the portal - optimismPortal2.depositERC20Transaction(address(0), _amount, 0, optimismPortal.minimumGasLimit(0), false, ""); + optimismPortal2.depositERC20Transaction(address(0), _amount, 0, optimismPortal2.minimumGasLimit(0), false, ""); // Check that the balance has been correctly updated assertEq(optimismPortal2.balance(), _amount); @@ -1742,7 +1905,7 @@ contract OptimismPortal2WithMockERC20_Test is OptimismPortal2_FinalizeWithdrawal // Deposit the token into the portal optimismPortal2.depositERC20Transaction( - address(bob), _defaultTx.value, 0, optimismPortal.minimumGasLimit(0), false, "" + address(bob), _defaultTx.value, 0, optimismPortal2.minimumGasLimit(0), false, "" ); assertEq(optimismPortal2.balance(), _defaultTx.value); diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index a6311c02b1e..fd5fd296f8f 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -255,6 +255,19 @@ contract SystemConfig_Init_ResourceConfig is SystemConfig_Init { _initializeWithResourceConfig(config, "SystemConfig: gas limit too low"); } + /// @dev Tests that `setResourceConfig` reverts if the gas limit is too low. + function test_setResourceConfig_elasticityMultiplierIs0_reverts() external { + IResourceMetering.ResourceConfig memory config = IResourceMetering.ResourceConfig({ + maxResourceLimit: 20_000_000, + elasticityMultiplier: 0, + baseFeeMaxChangeDenominator: 8, + systemTxMaxGas: 1_000_000, + minimumBaseFee: 1 gwei, + maximumBaseFee: 2 gwei + }); + _initializeWithResourceConfig(config, "SystemConfig: elasticity multiplier cannot be 0"); + } + /// @dev Tests that `setResourceConfig` reverts if the elasticity multiplier /// and max resource limit are configured such that there is a loss of precision. function test_setResourceConfig_badPrecision_reverts() external { diff --git a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol index 426dba30c72..78337fc3b4d 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfigInterop.t.sol @@ -26,6 +26,19 @@ contract SystemConfigInterop_Test is CommonTest { super.setUp(); } + /// @dev Tests that when the decimals is not 18, initialization reverts. + function test_initialize_decimalsIsNot18_reverts(uint8 decimals) external { + vm.assume(decimals != 18); + address _token = address(L1Token); + + vm.mockCall(_token, abi.encodeCall(ERC20.name, ()), abi.encode("Token")); + vm.mockCall(_token, abi.encodeCall(ERC20.symbol, ()), abi.encode("TKN")); + vm.mockCall(_token, abi.encodeCall(ERC20.decimals, ()), abi.encode(decimals)); + + vm.expectRevert("SystemConfig: bad decimals of gas paying token"); + _cleanStorageAndInit(_token); + } + /// @dev Tests that the gas paying token can be set. function testFuzz_setGasPayingToken_succeeds( address _token, diff --git a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol index 131851783c7..f5ef2d63559 100644 --- a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol @@ -5,6 +5,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { Reverter } from "test/mocks/Callers.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { stdError } from "forge-std/StdError.sol"; // Libraries import { Hashing } from "src/libraries/Hashing.sol"; @@ -148,17 +149,103 @@ contract L2CrossDomainMessenger_Test is CommonTest { assertEq(l2CrossDomainMessenger.failedMessages(hash), false); } - /// @dev Tests that `relayMessage` reverts if attempting to relay - /// a message sent to an L1 system contract. - function test_relayMessage_toSystemContract_reverts() external { - address target = address(l2ToL1MessagePasser); + /// @dev Tests that relayMessage reverts if the value sent does not match the amount + function test_relayMessage_fromOtherMessengerValueMismatch_reverts() external { + // set the target to be alice + address target = alice; address sender = address(l1CrossDomainMessenger); address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); bytes memory message = hex"1111"; + // cannot send a message where the amount inputted does not match the msg.value + vm.deal(caller, 10 ether); vm.prank(caller); + vm.expectRevert(stdError.assertionError); + l2CrossDomainMessenger.relayMessage{ value: 10 ether }( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 9 ether, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if a failed message is attempted to be replayed and the caller is the other + /// messenger + function test_relayMessage_fromOtherMessengerFailedMessageReplay_reverts() external { + // set the target to be alice + address target = alice; + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + // make a failed message + vm.etch(target, hex"fe"); + vm.prank(caller); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + + // cannot replay messages when the caller is the other messenger + vm.prank(caller); + vm.expectRevert(stdError.assertionError); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// sent to self + function test_relayMessage_toSelf_reverts() external { + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(caller); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l2CrossDomainMessenger), + 0, + 0, + message + ); + } + + /// @dev Tests that relayMessage reverts if attempting to relay a message + /// sent to the l2ToL1MessagePasser address + function test_relayMessage_toL2ToL1MessagePasser_reverts() external { + address sender = address(l1CrossDomainMessenger); + address caller = AddressAliasHelper.applyL1ToL2Alias(address(l1CrossDomainMessenger)); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(caller); + vm.expectRevert("CrossDomainMessenger: cannot send message to blocked system address"); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), + sender, + address(l2ToL1MessagePasser), + 0, + 0, + message + ); + } + + /// @dev Tests that the relayMessage function reverts if the message called by non-optimismPortal but not a failed + /// message + function test_relayMessage_relayingNewMessageByExternalUser_reverts() external { + address target = address(alice); + address sender = address(l1CrossDomainMessenger); + bytes memory message = hex"1111"; + + vm.store(address(optimismPortal), bytes32(0), bytes32(abi.encode(sender))); + + vm.prank(bob); vm.expectRevert("CrossDomainMessenger: message cannot be replayed"); - l1CrossDomainMessenger.relayMessage(Encoding.encodeVersionedNonce(0, 1), sender, target, 0, 0, message); + l2CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, target, 0, 0, message + ); } /// @dev Tests that `relayMessage` correctly resets the `xDomainMessageSender` diff --git a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol index 6d7a9bea513..1cbaf0c1eb2 100644 --- a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol +++ b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol @@ -64,6 +64,110 @@ contract DelayedWETH_Unlock_Test is DelayedWETH_Init { } contract DelayedWETH_Withdraw_Test is DelayedWETH_Init { + /// @dev Tests that withdrawing while unlocked and delay has passed is successful. + function test_withdraw_whileUnlocked_succeeds() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Withdraw the WETH. + vm.expectEmit(true, true, false, false); + emit Withdrawal(address(alice), 1 ether); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + assertEq(address(alice).balance, balance + 1 ether); + } + + /// @dev Tests that withdrawing when unlock was not called fails. + function test_withdraw_whileLocked_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Withdraw fails when unlock not called. + vm.expectRevert("DelayedWETH: withdrawal not unlocked"); + vm.prank(alice); + delayedWeth.withdraw(0 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing while locked and delay has not passed fails. + function test_withdraw_whileLockedNotLongEnough_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Call unlock. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay, but not long enough. + vm.warp(block.timestamp + delayedWeth.delay() - 1); + + // Withdraw fails when delay not met. + vm.expectRevert("DelayedWETH: withdrawal delay not met"); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing more than unlocked amount fails. + function test_withdraw_tooMuch_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + uint256 balance = address(alice).balance; + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Withdraw too much fails. + vm.expectRevert("DelayedWETH: insufficient unlocked withdrawal"); + vm.prank(alice); + delayedWeth.withdraw(2 ether); + assertEq(address(alice).balance, balance); + } + + /// @dev Tests that withdrawing while paused fails. + function test_withdraw_whenPaused_fails() public { + // Deposit some WETH. + vm.prank(alice); + delayedWeth.deposit{ value: 1 ether }(); + + // Unlock the withdrawal. + vm.prank(alice); + delayedWeth.unlock(alice, 1 ether); + + // Wait for the delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1); + + // Pause the contract. + address guardian = optimismPortal.guardian(); + vm.prank(guardian); + superchainConfig.pause("identifier"); + + // Withdraw fails. + vm.expectRevert("DelayedWETH: contract is paused"); + vm.prank(alice); + delayedWeth.withdraw(1 ether); + } +} + +contract DelayedWETH_WithdrawFrom_Test is DelayedWETH_Init { /// @dev Tests that withdrawing while unlocked and delay has passed is successful. function test_withdraw_whileUnlocked_succeeds() public { // Deposit some WETH. diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 88e361a80b4..70aad007e40 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -449,6 +449,17 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { gameProxy.initialize(); } + /// @dev Tests that startingOutputRoot and it's getters are set correctly. + function test_startingOutputRootGetters_succeeds() public view { + (Hash root, uint256 l2BlockNumber) = gameProxy.startingOutputRoot(); + (Hash anchorRoot, uint256 anchorRootBlockNumber) = anchorStateRegistry.anchors(GAME_TYPE); + + assertEq(gameProxy.startingBlockNumber(), l2BlockNumber); + assertEq(gameProxy.startingBlockNumber(), anchorRootBlockNumber); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(root)); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(anchorRoot)); + } + /// @dev Tests that the user cannot control the first 4 bytes of the CWIA data, disallowing them to control the /// entrypoint when no calldata is provided to a call. function test_cwiaCalldata_userCannotControlSelector_succeeds() public { @@ -1986,6 +1997,18 @@ contract FaultDisputeGame_Test is FaultDisputeGame_Init { assertEq(datLen, expectedLen); } + /// @dev Tests that if the game is not in progress, querying of `getChallengerDuration` reverts + function test_getChallengerDuration_gameNotInProgress_reverts() public { + // resolve the game + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw()); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + vm.expectRevert(GameNotInProgress.selector); + gameProxy.getChallengerDuration(1); + } + /// @dev Static unit test asserting that resolveClaim isn't possible if there's time /// left for a counter. function test_resolution_lastSecondDisputes_succeeds() public { diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index 698d8a0bd1f..99b70b9c6df 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -198,6 +198,61 @@ contract PermissionedDisputeGame_Test is PermissionedDisputeGame_Init { vm.stopPrank(); } + /// @dev Tests that step works properly. + function test_step_succeeds() public { + // Give the test contract some ether + vm.deal(CHALLENGER, 1_000 ether); + + vm.startPrank(CHALLENGER, CHALLENGER); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, _dummyClaim()); + + // Verify game state before step + assertEq(uint256(gameProxy.status()), uint256(GameStatus.IN_PROGRESS)); + + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, absolutePrestateData, hex""); + + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw() + 1); + gameProxy.resolveClaim(8, 0); + gameProxy.resolveClaim(7, 0); + gameProxy.resolveClaim(6, 0); + gameProxy.resolveClaim(5, 0); + gameProxy.resolveClaim(4, 0); + gameProxy.resolveClaim(3, 0); + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + assertEq(uint256(gameProxy.status()), uint256(GameStatus.CHALLENGER_WINS)); + assertEq(gameProxy.resolvedAt().raw(), block.timestamp); + (, address counteredBy,,,,,) = gameProxy.claimData(0); + assertEq(counteredBy, CHALLENGER); + } + + /// @dev Helper to return a pseudo-random claim + function _dummyClaim() internal view returns (Claim) { + return Claim.wrap(keccak256(abi.encode(gasleft()))); + } + /// @dev Helper to get the required bond for the given claim index. function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); diff --git a/packages/contracts-bedrock/test/libraries/Encoding.t.sol b/packages/contracts-bedrock/test/libraries/Encoding.t.sol index a301fdd97b3..277cce328dc 100644 --- a/packages/contracts-bedrock/test/libraries/Encoding.t.sol +++ b/packages/contracts-bedrock/test/libraries/Encoding.t.sol @@ -71,6 +71,18 @@ contract Encoding_Test is CommonTest { assertEq(legacyEncoding, bedrockEncoding); } + /// @dev Tests that encodeCrossDomainMessage reverts if version is greater than 1. + function testFuzz_encodeCrossDomainMessage_versionGreaterThanOne_reverts(uint256 nonce) external { + // nonce >> 240 must be greater than 1 + uint256 minInvalidNonce = (uint256(type(uint240).max) + 1) * 2; + nonce = bound(nonce, minInvalidNonce, type(uint256).max); + + EncodingContract encoding = new EncodingContract(); + + vm.expectRevert(bytes("Encoding: unknown cross domain message version")); + encoding.encodeCrossDomainMessage(nonce, address(this), address(this), 1, 100, hex""); + } + /// @dev Tests deposit transaction encoding. function testDiff_encodeDepositTransaction_succeeds( address _from, @@ -94,3 +106,20 @@ contract Encoding_Test is CommonTest { assertEq(txn, _txn); } } + +contract EncodingContract { + function encodeCrossDomainMessage( + uint256 nonce, + address sender, + address target, + uint256 value, + uint256 gasLimit, + bytes memory data + ) + external + pure + returns (bytes memory) + { + return Encoding.encodeCrossDomainMessage(nonce, sender, target, value, gasLimit, data); + } +} diff --git a/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol b/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol deleted file mode 100644 index ebc2289f9c1..00000000000 --- a/packages/contracts-bedrock/test/mocks/OptimistInviterHelper.sol +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; - -/// @notice Simple helper contract that helps with testing flow and signature for -/// OptimistInviter contract. Made this a separate contract instead of including -/// in OptimistInviter.t.sol for reusability. -contract OptimistInviterHelper { - /// @notice EIP712 typehash for the ClaimableInvite type. - bytes32 public constant CLAIMABLE_INVITE_TYPEHASH = keccak256("ClaimableInvite(address issuer,bytes32 nonce)"); - - /// @notice EIP712 typehash for the EIP712Domain type that is included as part of the signature. - bytes32 public constant EIP712_DOMAIN_TYPEHASH = - keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); - - /// @notice Address of OptimistInviter contract we are testing. - OptimistInviter public optimistInviter; - - /// @notice OptimistInviter contract name. Used to construct the EIP-712 domain. - string public name; - - /// @notice Keeps track of current nonce to generate new nonces for each invite. - uint256 public currentNonce; - - constructor(OptimistInviter _optimistInviter, string memory _name) { - optimistInviter = _optimistInviter; - name = _name; - } - - /// @notice Returns the hash of the struct ClaimableInvite. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @return EIP-712 typed struct hash. - function getClaimableInviteStructHash(OptimistInviter.ClaimableInvite memory _claimableInvite) - public - pure - returns (bytes32) - { - return keccak256(abi.encode(CLAIMABLE_INVITE_TYPEHASH, _claimableInvite.issuer, _claimableInvite.nonce)); - } - - /// @notice Returns a bytes32 nonce that should change everytime. In practice, people should use - /// pseudorandom nonces. - /// @return Nonce that should be used as part of ClaimableInvite. - function consumeNonce() public returns (bytes32) { - return bytes32(keccak256(abi.encode(currentNonce++))); - } - - /// @notice Returns a ClaimableInvite with the issuer and current nonce. - /// @param _issuer Issuer to include in the ClaimableInvite. - /// @return ClaimableInvite that can be hashed & signed. - function getClaimableInviteWithNewNonce(address _issuer) public returns (OptimistInviter.ClaimableInvite memory) { - return OptimistInviter.ClaimableInvite(_issuer, consumeNonce()); - } - - /// @notice Computes the EIP712 digest with default correct parameters. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @return EIP-712 compatible digest. - function getDigest(OptimistInviter.ClaimableInvite calldata _claimableInvite) public view returns (bytes32) { - return getDigestWithEIP712Domain( - _claimableInvite, - bytes(name), - bytes(optimistInviter.EIP712_VERSION()), - block.chainid, - address(optimistInviter) - ); - } - - /// @notice Computes the EIP712 digest with the given domain parameters. - /// Used for testing that different domain parameters fail. - /// @param _claimableInvite ClaimableInvite struct to hash. - /// @param _name Contract name to use in the EIP712 domain. - /// @param _version Contract version to use in the EIP712 domain. - /// @param _chainid Chain ID to use in the EIP712 domain. - /// @param _verifyingContract Address to use in the EIP712 domain. - /// @return EIP-712 compatible digest. - function getDigestWithEIP712Domain( - OptimistInviter.ClaimableInvite calldata _claimableInvite, - bytes memory _name, - bytes memory _version, - uint256 _chainid, - address _verifyingContract - ) - public - pure - returns (bytes32) - { - bytes32 domainSeparator = keccak256( - abi.encode(EIP712_DOMAIN_TYPEHASH, keccak256(_name), keccak256(_version), _chainid, _verifyingContract) - ); - return ECDSA.toTypedDataHash(domainSeparator, getClaimableInviteStructHash(_claimableInvite)); - } -} diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index 0282ea0b3d9..8d3feac2de0 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -61,7 +61,7 @@ contract DeployImplementationsInput_Test is Test { dii.disputeGameFinalityDelaySeconds(); vm.expectRevert("DeployImplementationsInput: not set"); - dii.release(); + dii.l1ContractsRelease(); vm.expectRevert("DeployImplementationsInput: not set"); dii.superchainConfigProxy(); @@ -69,23 +69,9 @@ contract DeployImplementationsInput_Test is Test { vm.expectRevert("DeployImplementationsInput: not set"); dii.protocolVersionsProxy(); - vm.expectRevert("DeployImplementationsInput: not set"); - dii.opcmProxyOwner(); - vm.expectRevert("DeployImplementationsInput: not set"); dii.standardVersionsToml(); } - - function test_opcmProxyOwner_whenNotSet_reverts() public { - vm.expectRevert("DeployImplementationsInput: not set"); - dii.opcmProxyOwner(); - } - - function test_opcmProxyOwner_succeeds() public { - dii.set(dii.opcmProxyOwner.selector, address(msg.sender)); - address opcmProxyOwner = dii.opcmProxyOwner(); - assertEq(address(msg.sender), address(opcmProxyOwner), "100"); - } } contract DeployImplementationsOutput_Test is Test { @@ -96,17 +82,7 @@ contract DeployImplementationsOutput_Test is Test { } function test_set_succeeds() public { - IProxy proxy = IProxy( - DeployUtils.create1({ - _name: "Proxy", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxy.__constructor__, (address(0)))) - }) - ); - address opcmImpl = address(makeAddr("opcmImpl")); - vm.prank(address(0)); - proxy.upgradeTo(opcmImpl); - - OPContractsManager opcmProxy = OPContractsManager(address(proxy)); + OPContractsManager opcm = OPContractsManager(address(makeAddr("opcm"))); IOptimismPortal2 optimismPortalImpl = IOptimismPortal2(payable(makeAddr("optimismPortalImpl"))); IDelayedWETH delayedWETHImpl = IDelayedWETH(payable(makeAddr("delayedWETHImpl"))); IPreimageOracle preimageOracleSingleton = IPreimageOracle(makeAddr("preimageOracleSingleton")); @@ -120,8 +96,7 @@ contract DeployImplementationsOutput_Test is Test { IOptimismMintableERC20Factory(makeAddr("optimismMintableERC20FactoryImpl")); IDisputeGameFactory disputeGameFactoryImpl = IDisputeGameFactory(makeAddr("disputeGameFactoryImpl")); - vm.etch(address(opcmProxy), address(opcmProxy).code); - vm.etch(address(opcmImpl), hex"01"); + vm.etch(address(opcm), hex"01"); vm.etch(address(optimismPortalImpl), hex"01"); vm.etch(address(delayedWETHImpl), hex"01"); vm.etch(address(preimageOracleSingleton), hex"01"); @@ -132,7 +107,7 @@ contract DeployImplementationsOutput_Test is Test { vm.etch(address(l1StandardBridgeImpl), hex"01"); vm.etch(address(optimismMintableERC20FactoryImpl), hex"01"); vm.etch(address(disputeGameFactoryImpl), hex"01"); - dio.set(dio.opcmProxy.selector, address(opcmProxy)); + dio.set(dio.opcm.selector, address(opcm)); dio.set(dio.optimismPortalImpl.selector, address(optimismPortalImpl)); dio.set(dio.delayedWETHImpl.selector, address(delayedWETHImpl)); dio.set(dio.preimageOracleSingleton.selector, address(preimageOracleSingleton)); @@ -144,7 +119,7 @@ contract DeployImplementationsOutput_Test is Test { dio.set(dio.optimismMintableERC20FactoryImpl.selector, address(optimismMintableERC20FactoryImpl)); dio.set(dio.disputeGameFactoryImpl.selector, address(disputeGameFactoryImpl)); - assertEq(address(opcmProxy), address(dio.opcmProxy()), "50"); + assertEq(address(opcm), address(dio.opcm()), "50"); assertEq(address(optimismPortalImpl), address(dio.optimismPortalImpl()), "100"); assertEq(address(delayedWETHImpl), address(dio.delayedWETHImpl()), "200"); assertEq(address(preimageOracleSingleton), address(dio.preimageOracleSingleton()), "300"); @@ -273,7 +248,7 @@ contract DeployImplementations_Test is Test { function test_deployImplementation_succeeds() public { string memory deployContractsRelease = "dev-release"; - dii.set(dii.release.selector, deployContractsRelease); + dii.set(dii.l1ContractsRelease.selector, deployContractsRelease); deployImplementations.deploySystemConfigImpl(dii, dio); assertTrue(address(0) != address(dio.systemConfigImpl())); } @@ -282,7 +257,7 @@ contract DeployImplementations_Test is Test { // All hardcoded addresses below are taken from the superchain-registry config: // https://github.com/ethereum-optimism/superchain-registry/blob/be65d22f8128cf0c4e5b4e1f677daf86843426bf/validation/standard/standard-versions.toml#L11 string memory testRelease = "op-contracts/v1.6.0"; - dii.set(dii.release.selector, testRelease); + dii.set(dii.l1ContractsRelease.selector, testRelease); deployImplementations.deploySystemConfigImpl(dii, dio); address srSystemConfigImpl = address(0xF56D96B2535B932656d3c04Ebf51baBff241D886); @@ -335,71 +310,6 @@ contract DeployImplementations_Test is Test { assertEq(srDisputeGameFactoryImpl, address(dio.disputeGameFactoryImpl())); } - function test_deploy_atNonExistentRelease_reverts() public { - string memory unknownRelease = "op-contracts/v0.0.0"; - dii.set(dii.release.selector, unknownRelease); - - bytes memory expectedErr = - bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); - - vm.expectRevert(expectedErr); - deployImplementations.deploySystemConfigImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1CrossDomainMessengerImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1ERC721BridgeImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployL1StandardBridgeImpl(dii, dio); - - vm.expectRevert(expectedErr); - deployImplementations.deployOptimismMintableERC20FactoryImpl(dii, dio); - - // TODO: Uncomment the code below when OPContractsManager is deployed based on release. Superchain-registry - // doesn't contain OPContractsManager yet. - // dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); - // dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); - // vm.etch(address(superchainConfigProxy), hex"01"); - // vm.etch(address(protocolVersionsProxy), hex"01"); - // vm.expectRevert(expectedErr); - // deployImplementations.deployOPContractsManagerImpl(dii, dio); - - dii.set(dii.proofMaturityDelaySeconds.selector, 1); - dii.set(dii.disputeGameFinalityDelaySeconds.selector, 2); - vm.expectRevert(expectedErr); - deployImplementations.deployOptimismPortalImpl(dii, dio); - - dii.set(dii.withdrawalDelaySeconds.selector, 1); - vm.expectRevert(expectedErr); - deployImplementations.deployDelayedWETHImpl(dii, dio); - - dii.set(dii.minProposalSizeBytes.selector, 1); - dii.set(dii.challengePeriodSeconds.selector, 2); - vm.expectRevert(expectedErr); - deployImplementations.deployPreimageOracleSingleton(dii, dio); - - address preImageOracleSingleton = makeAddr("preImageOracleSingleton"); - vm.etch(address(preImageOracleSingleton), hex"01"); - dio.set(dio.preimageOracleSingleton.selector, preImageOracleSingleton); - vm.expectRevert(expectedErr); - deployImplementations.deployMipsSingleton(dii, dio); - - vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release - deployImplementations.deployDisputeGameFactoryImpl(dii, dio); - } - - function test_deploy_noContractExistsAtRelease_reverts() public { - string memory unknownRelease = "op-contracts/v1.3.0"; - dii.set(dii.release.selector, unknownRelease); - bytes memory expectedErr = - bytes(string.concat("DeployImplementations: failed to deploy release ", unknownRelease)); - - vm.expectRevert(expectedErr); // fault proof contracts don't exist at this release - deployImplementations.deployDisputeGameFactoryImpl(dii, dio); - } - function testFuzz_run_memory_succeeds(bytes32 _seed) public { withdrawalDelaySeconds = uint256(hash(_seed, 0)); minProposalSizeBytes = uint256(hash(_seed, 1)); @@ -409,7 +319,7 @@ contract DeployImplementations_Test is Test { string memory release = string(bytes.concat(hash(_seed, 5))); protocolVersionsProxy = IProtocolVersions(address(uint160(uint256(hash(_seed, 7))))); - // Must configure the ProxyAdmin contract which is used to upgrade the OPCM's proxy contract. + // Must configure the ProxyAdmin contract. IProxyAdmin superchainProxyAdmin = IProxyAdmin( DeployUtils.create1({ _name: "ProxyAdmin", @@ -439,10 +349,9 @@ contract DeployImplementations_Test is Test { dii.set(dii.proofMaturityDelaySeconds.selector, proofMaturityDelaySeconds); dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); - dii.set(dii.opcmProxyOwner.selector, msg.sender); deployImplementations.run(dii, dio); @@ -453,10 +362,9 @@ contract DeployImplementations_Test is Test { assertEq(proofMaturityDelaySeconds, dii.proofMaturityDelaySeconds(), "400"); assertEq(disputeGameFinalityDelaySeconds, dii.disputeGameFinalityDelaySeconds(), "500"); assertEq(1, dii.mipsVersion(), "512"); - assertEq(release, dii.release(), "525"); + assertEq(release, dii.l1ContractsRelease(), "525"); assertEq(address(superchainConfigProxy), address(dii.superchainConfigProxy()), "550"); assertEq(address(protocolVersionsProxy), address(dii.protocolVersionsProxy()), "575"); - assertEq(msg.sender, dii.opcmProxyOwner(), "580"); // Architecture assertions. assertEq(address(dio.mipsSingleton().oracle()), address(dio.preimageOracleSingleton()), "600"); @@ -475,7 +383,7 @@ contract DeployImplementations_Test is Test { dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); string memory release = "dev-release"; - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 5249ded41cc..5280328168b 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -39,10 +39,10 @@ contract DeployOPChainInput_Test is Test { address unsafeBlockSigner = makeAddr("unsafeBlockSigner"); address proposer = makeAddr("proposer"); address challenger = makeAddr("challenger"); + address opcm = makeAddr("opcm"); uint32 basefeeScalar = 100; uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; - OPContractsManager opcm = OPContractsManager(makeAddr("opcm")); string saltMixer = "saltMixer"; function setUp() public { @@ -60,9 +60,8 @@ contract DeployOPChainInput_Test is Test { doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); doi.set(doi.allowCustomDisputeParameters.selector, true); - - (IProxy opcmProxy) = DeployUtils.buildERC1967ProxyWithImpl("opcmProxy"); - doi.set(doi.opcmProxy.selector, address(opcmProxy)); + doi.set(doi.opcm.selector, opcm); + vm.etch(opcm, hex"01"); // Compare the default inputs to the getter methods. assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "200"); @@ -74,7 +73,7 @@ contract DeployOPChainInput_Test is Test { assertEq(basefeeScalar, doi.basefeeScalar(), "800"); assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "900"); assertEq(l2ChainId, doi.l2ChainId(), "1000"); - assertEq(address(opcmProxy), address(doi.opcmProxy()), "1100"); + assertEq(opcm, address(doi.opcm()), "1100"); assertEq(true, doi.allowCustomDisputeParameters(), "1200"); } @@ -396,7 +395,7 @@ contract DeployOPChain_TestBase is Test { dii.set(dii.proofMaturityDelaySeconds.selector, proofMaturityDelaySeconds); dii.set(dii.disputeGameFinalityDelaySeconds.selector, disputeGameFinalityDelaySeconds); dii.set(dii.mipsVersion.selector, 1); - dii.set(dii.release.selector, release); + dii.set(dii.l1ContractsRelease.selector, release); dii.set(dii.superchainConfigProxy.selector, address(superchainConfigProxy)); dii.set(dii.protocolVersionsProxy.selector, address(protocolVersionsProxy)); // End users of the DeployImplementations contract will need to set the `standardVersionsToml`. @@ -404,7 +403,7 @@ contract DeployOPChain_TestBase is Test { string.concat(vm.projectRoot(), "/test/fixtures/standard-versions.toml"); string memory standardVersionsToml = vm.readFile(standardVersionsTomlPath); dii.set(dii.standardVersionsToml.selector, standardVersionsToml); - dii.set(dii.opcmProxyOwner.selector, address(1)); + deployImplementations.run(dii, dio); // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. @@ -412,7 +411,7 @@ contract DeployOPChain_TestBase is Test { (doi, doo) = deployOPChain.etchIOContracts(); // Set the OPContractsManager input for DeployOPChain. - opcm = dio.opcmProxy(); + opcm = dio.opcm(); } // See the function of the same name in the `DeployImplementations_Test` contract of @@ -466,7 +465,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); // Not fuzzed since it must be an actual instance. + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.saltMixer.selector, saltMixer); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); @@ -559,7 +558,7 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { doi.set(doi.basefeeScalar.selector, basefeeScalar); doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcmProxy.selector, address(opcm)); + doi.set(doi.opcm.selector, address(opcm)); doi.set(doi.saltMixer.selector, saltMixer); doi.set(doi.gasLimit.selector, gasLimit); doi.set(doi.disputeGameType.selector, disputeGameType); diff --git a/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol deleted file mode 100644 index 4c9b72254d3..00000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/AttestationStation.t.sol +++ /dev/null @@ -1,115 +0,0 @@ -//SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -/* Testing utilities */ -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; - -contract AttestationStation_Initializer is Test { - address alice_attestor = address(128); - address bob = address(256); - address sally = address(512); - - function setUp() public { - // Give alice and bob some ETH - vm.deal(alice_attestor, 1 ether); - - vm.label(alice_attestor, "alice_attestor"); - vm.label(bob, "bob"); - vm.label(sally, "sally"); - } -} - -contract AttestationStationTest is AttestationStation_Initializer { - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - function test_attest_individual_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - vm.expectEmit(true, true, true, true); - emit AttestationCreated(alice_attestor, bob, bytes32("foo"), bytes("bar")); - - vm.prank(alice_attestor); - attestationStation.attest({ _about: bob, _key: bytes32("foo"), _val: bytes("bar") }); - } - - function test_attest_single_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - AttestationStation.AttestationData[] memory attestationDataArr = new AttestationStation.AttestationData[](1); - - // alice is going to attest about bob - AttestationStation.AttestationData memory attestationData = AttestationStation.AttestationData({ - about: bob, - key: bytes32("test-key:string"), - val: bytes("test-value") - }); - - // assert the attestation starts empty - assertEq(attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), ""); - - // make attestation - vm.prank(alice_attestor); - attestationDataArr[0] = attestationData; - attestationStation.attest(attestationDataArr); - - // assert the attestation is there - assertEq( - attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), - attestationData.val - ); - - bytes memory new_val = bytes("new updated value"); - // make a new attestations to same about and key - attestationData = - AttestationStation.AttestationData({ about: attestationData.about, key: attestationData.key, val: new_val }); - - vm.prank(alice_attestor); - attestationDataArr[0] = attestationData; - attestationStation.attest(attestationDataArr); - - // assert the attestation is updated - assertEq( - attestationStation.attestations(alice_attestor, attestationData.about, attestationData.key), - attestationData.val - ); - } - - function test_attest_bulk_succeeds() external { - AttestationStation attestationStation = new AttestationStation(); - - vm.prank(alice_attestor); - - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](3); - attestationData[0] = AttestationStation.AttestationData({ - about: bob, - key: bytes32("test-key:string"), - val: bytes("test-value") - }); - - attestationData[1] = - AttestationStation.AttestationData({ about: bob, key: bytes32("test-key2"), val: bytes("test-value2") }); - - attestationData[2] = AttestationStation.AttestationData({ - about: sally, - key: bytes32("test-key:string"), - val: bytes("test-value3") - }); - - attestationStation.attest(attestationData); - - // assert the attestations are there - assertEq( - attestationStation.attestations(alice_attestor, attestationData[0].about, attestationData[0].key), - attestationData[0].val - ); - assertEq( - attestationStation.attestations(alice_attestor, attestationData[1].about, attestationData[1].key), - attestationData[1].val - ); - assertEq( - attestationStation.attestations(alice_attestor, attestationData[2].about, attestationData[2].key), - attestationData[2].val - ); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol deleted file mode 100644 index 2eb2f07e860..00000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/Optimist.t.sol +++ /dev/null @@ -1,543 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity >=0.6.2 <0.9.0; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { IMulticall3 } from "forge-std/interfaces/IMulticall3.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { Optimist } from "src/periphery/op-nft/Optimist.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { IERC721 } from "@openzeppelin/contracts/token/ERC721/IERC721.sol"; - -library Multicall { - bytes internal constant code = - hex"6080604052600436106100f35760003560e01c80634d2301cc1161008a578063a8b0574e11610059578063a8b0574e1461025a578063bce38bd714610275578063c3077fa914610288578063ee82ac5e1461029b57600080fd5b80634d2301cc146101ec57806372425d9d1461022157806382ad56cb1461023457806386d516e81461024757600080fd5b80633408e470116100c65780633408e47014610191578063399542e9146101a45780633e64a696146101c657806342cbb15c146101d957600080fd5b80630f28c97d146100f8578063174dea711461011a578063252dba421461013a57806327e86d6e1461015b575b600080fd5b34801561010457600080fd5b50425b6040519081526020015b60405180910390f35b61012d610128366004610a85565b6102ba565b6040516101119190610bbe565b61014d610148366004610a85565b6104ef565b604051610111929190610bd8565b34801561016757600080fd5b50437fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0140610107565b34801561019d57600080fd5b5046610107565b6101b76101b2366004610c60565b610690565b60405161011193929190610cba565b3480156101d257600080fd5b5048610107565b3480156101e557600080fd5b5043610107565b3480156101f857600080fd5b50610107610207366004610ce2565b73ffffffffffffffffffffffffffffffffffffffff163190565b34801561022d57600080fd5b5044610107565b61012d610242366004610a85565b6106ab565b34801561025357600080fd5b5045610107565b34801561026657600080fd5b50604051418152602001610111565b61012d610283366004610c60565b61085a565b6101b7610296366004610a85565b610a1a565b3480156102a757600080fd5b506101076102b6366004610d18565b4090565b60606000828067ffffffffffffffff8111156102d8576102d8610d31565b60405190808252806020026020018201604052801561031e57816020015b6040805180820190915260008152606060208201528152602001906001900390816102f65790505b5092503660005b8281101561047757600085828151811061034157610341610d60565b6020026020010151905087878381811061035d5761035d610d60565b905060200281019061036f9190610d8f565b6040810135958601959093506103886020850185610ce2565b73ffffffffffffffffffffffffffffffffffffffff16816103ac6060870187610dcd565b6040516103ba929190610e32565b60006040518083038185875af1925050503d80600081146103f7576040519150601f19603f3d011682016040523d82523d6000602084013e6103fc565b606091505b50602080850191909152901515808452908501351761046d577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260846000fd5b5050600101610325565b508234146104e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f4d756c746963616c6c333a2076616c7565206d69736d6174636800000000000060448201526064015b60405180910390fd5b50505092915050565b436060828067ffffffffffffffff81111561050c5761050c610d31565b60405190808252806020026020018201604052801561053f57816020015b606081526020019060019003908161052a5790505b5091503660005b8281101561068657600087878381811061056257610562610d60565b90506020028101906105749190610e42565b92506105836020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166105a66020850185610dcd565b6040516105b4929190610e32565b6000604051808303816000865af19150503d80600081146105f1576040519150601f19603f3d011682016040523d82523d6000602084013e6105f6565b606091505b5086848151811061060957610609610d60565b602090810291909101015290508061067d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b50600101610546565b5050509250929050565b43804060606106a086868661085a565b905093509350939050565b6060818067ffffffffffffffff8111156106c7576106c7610d31565b60405190808252806020026020018201604052801561070d57816020015b6040805180820190915260008152606060208201528152602001906001900390816106e55790505b5091503660005b828110156104e657600084828151811061073057610730610d60565b6020026020010151905086868381811061074c5761074c610d60565b905060200281019061075e9190610e76565b925061076d6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff166107906040850185610dcd565b60405161079e929190610e32565b6000604051808303816000865af19150503d80600081146107db576040519150601f19603f3d011682016040523d82523d6000602084013e6107e0565b606091505b506020808401919091529015158083529084013517610851577f08c379a000000000000000000000000000000000000000000000000000000000600052602060045260176024527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060445260646000fd5b50600101610714565b6060818067ffffffffffffffff81111561087657610876610d31565b6040519080825280602002602001820160405280156108bc57816020015b6040805180820190915260008152606060208201528152602001906001900390816108945790505b5091503660005b82811015610a105760008482815181106108df576108df610d60565b602002602001015190508686838181106108fb576108fb610d60565b905060200281019061090d9190610e42565b925061091c6020840184610ce2565b73ffffffffffffffffffffffffffffffffffffffff1661093f6020850185610dcd565b60405161094d929190610e32565b6000604051808303816000865af19150503d806000811461098a576040519150601f19603f3d011682016040523d82523d6000602084013e61098f565b606091505b506020830152151581528715610a07578051610a07576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4d756c746963616c6c333a2063616c6c206661696c656400000000000000000060448201526064016104dd565b506001016108c3565b5050509392505050565b6000806060610a2b60018686610690565b919790965090945092505050565b60008083601f840112610a4b57600080fd5b50813567ffffffffffffffff811115610a6357600080fd5b6020830191508360208260051b8501011115610a7e57600080fd5b9250929050565b60008060208385031215610a9857600080fd5b823567ffffffffffffffff811115610aaf57600080fd5b610abb85828601610a39565b90969095509350505050565b6000815180845260005b81811015610aed57602081850181015186830182015201610ad1565b81811115610aff576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b600082825180855260208086019550808260051b84010181860160005b84811015610bb1578583037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe001895281518051151584528401516040858501819052610b9d81860183610ac7565b9a86019a9450505090830190600101610b4f565b5090979650505050505050565b602081526000610bd16020830184610b32565b9392505050565b600060408201848352602060408185015281855180845260608601915060608160051b870101935082870160005b82811015610c52577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa0888703018452610c40868351610ac7565b95509284019290840190600101610c06565b509398975050505050505050565b600080600060408486031215610c7557600080fd5b83358015158114610c8557600080fd5b9250602084013567ffffffffffffffff811115610ca157600080fd5b610cad86828701610a39565b9497909650939450505050565b838152826020820152606060408201526000610cd96060830184610b32565b95945050505050565b600060208284031215610cf457600080fd5b813573ffffffffffffffffffffffffffffffffffffffff81168114610bd157600080fd5b600060208284031215610d2a57600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81833603018112610dc357600080fd5b9190910192915050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112610e0257600080fd5b83018035915067ffffffffffffffff821115610e1d57600080fd5b602001915036819003821315610a7e57600080fd5b8183823760009101908152919050565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc1833603018112610dc357600080fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1833603018112610dc357600080fdfea2646970667358221220bb2b5c71a328032f97c676ae39a1ec2148d3e5d6f73d95e9b17910152d61f16264736f6c634300080c0033"; - address internal constant addr = 0xcA11bde05977b3631167028862bE2a173976CA11; -} - -contract Optimist_Initializer is Test { - event Transfer(address indexed from, address indexed to, uint256 indexed tokenId); - event Initialized(uint8); - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - string constant name = "Optimist name"; - string constant symbol = "OPTIMISTSYMBOL"; - string constant base_uri = "https://storageapi.fleek.co/6442819a1b05-bucket/optimist-nft/attributes"; - AttestationStation attestationStation; - Optimist optimist; - OptimistAllowlist optimistAllowlist; - OptimistInviter optimistInviter; - - // Helps with EIP-712 signature generation - OptimistInviterHelper optimistInviterHelper; - - // To test multicall for claiming and minting in one call - IMulticall3 multicall3; - - address internal carol_baseURIAttestor; - address internal alice_allowlistAttestor; - address internal eve_inviteGranter; - address internal ted_coinbaseAttestor; - address internal bob; - address internal sally; - - /// @notice BaseURI attestor sets the baseURI of the Optimist NFT. - function _attestBaseURI(string memory _baseUri) internal { - bytes32 baseURIAttestationKey = optimist.BASE_URI_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - attestationData[0] = - AttestationStation.AttestationData(address(optimist), baseURIAttestationKey, bytes(_baseUri)); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(carol_baseURIAttestor, address(optimist), baseURIAttestationKey, bytes(_baseUri)); - vm.prank(carol_baseURIAttestor); - attestationStation.attest(attestationData); - } - - /// @notice Allowlist attestor creates an attestation for an address. - function _attestAllowlist(address _about) internal { - bytes32 attestationKey = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = - AttestationStation.AttestationData({ about: _about, key: attestationKey, val: bytes("true") }); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(alice_allowlistAttestor, _about, attestationKey, bytes("true")); - - vm.prank(alice_allowlistAttestor); - attestationStation.attest(attestationData); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Coinbase Quest attestor creates an attestation for an address. - function _attestCoinbaseQuest(address _about) internal { - bytes32 attestationKey = optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(); - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = - AttestationStation.AttestationData({ about: _about, key: attestationKey, val: bytes("true") }); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated(ted_coinbaseAttestor, _about, attestationKey, bytes("true")); - - vm.prank(ted_coinbaseAttestor); - attestationStation.attest(attestationData); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Issues invite, then claims it using the claimer's address. - function _inviteAndClaim(address _about) internal { - uint256 inviterPrivateKey = 0xbeefbeef; - address inviter = vm.addr(inviterPrivateKey); - - address[] memory addresses = new address[](1); - addresses[0] = inviter; - - vm.prank(eve_inviteGranter); - - // grant invites to Inviter; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(inviter); - - // EIP-712 sign with Inviter's private key - - (uint8 v, bytes32 r, bytes32 s) = vm.sign(inviterPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - bytes memory signature = abi.encodePacked(r, s, v); - - bytes32 hashedCommit = keccak256(abi.encode(_about, signature)); - - // commit the invite - vm.prank(_about); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - // reveal and claim the invite - optimistInviter.claimInvite(_about, claimableInvite, signature); - - assertTrue(optimist.isOnAllowList(_about)); - } - - /// @notice Mocks the allowlistAttestor to always return true for a given address. - function _mockAllowlistTrueFor(address _claimer) internal { - vm.mockCall( - address(optimistAllowlist), abi.encodeCall(OptimistAllowlist.isAllowedToMint, (_claimer)), abi.encode(true) - ); - - assertTrue(optimist.isOnAllowList(_claimer)); - } - - /// @notice Returns address as uint256. - function _getTokenId(address _owner) internal pure returns (uint256) { - return uint256(uint160(address(_owner))); - } - - function setUp() public { - carol_baseURIAttestor = makeAddr("carol_baseURIAttestor"); - alice_allowlistAttestor = makeAddr("alice_allowlistAttestor"); - eve_inviteGranter = makeAddr("eve_inviteGranter"); - ted_coinbaseAttestor = makeAddr("ted_coinbaseAttestor"); - bob = makeAddr("bob"); - sally = makeAddr("sally"); - _initializeContracts(); - } - - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - vm.expectEmit(true, true, false, false); - emit Initialized(1); - - optimistInviter = - new OptimistInviter({ _inviteGranter: eve_inviteGranter, _attestationStation: attestationStation }); - - optimistInviter.initialize("OptimistInviter"); - - // Initialize the helper which helps sign EIP-712 signatures - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - - optimistAllowlist = new OptimistAllowlist({ - _attestationStation: attestationStation, - _allowlistAttestor: alice_allowlistAttestor, - _coinbaseQuestAttestor: ted_coinbaseAttestor, - _optimistInviter: address(optimistInviter) - }); - - optimist = new Optimist({ - _name: name, - _symbol: symbol, - _baseURIAttestor: carol_baseURIAttestor, - _attestationStation: attestationStation, - _optimistAllowlist: optimistAllowlist - }); - - multicall3 = IMulticall3(Multicall.addr); - vm.etch(Multicall.addr, Multicall.code); - } -} - -contract OptimistTest is Optimist_Initializer { - /// @notice Check that constructor and initializer parameters are correctly set. - function test_initialize_succeeds() external view { - // expect name to be set - assertEq(optimist.name(), name); - // expect symbol to be set - assertEq(optimist.symbol(), symbol); - // expect attestationStation to be set - assertEq(address(optimist.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimist.BASE_URI_ATTESTOR(), carol_baseURIAttestor); - } - - /// @notice Bob should be able to mint an NFT if he is allowlisted - /// by the allowlistAttestor and has a balance of 0. - function test_mint_afterAllowlistAttestation_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // allowlist bob - _attestAllowlist(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should be able to mint an NFT if he claimed an invite through OptimistInviter - /// and has a balance of 0. - function test_mint_afterInviteClaimed_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob claims an invite - _inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should be able to mint an NFT if he has an attestation from Coinbase Quest - /// attestor and has a balance of 0. - function test_mint_afterCoinbaseQuestAttestation_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob receives attestation from Coinbase Quest attestor - _attestCoinbaseQuest(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Multiple valid attestations should allow Bob to mint. - function test_mint_afterMultipleAttestations_succeeds() external { - // bob should start with 0 balance - assertEq(optimist.balanceOf(bob), 0); - - // bob receives attestation from Coinbase Quest attestor - _attestCoinbaseQuest(bob); - - // allowlist bob - _attestAllowlist(bob); - - // bob claims an invite - _inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // Check that the OptimistAllowlist is checked - bytes memory data = abi.encodeCall(OptimistAllowlist.isAllowedToMint, (bob)); - vm.expectCall(address(optimistAllowlist), data); - - // mint an NFT and expect mint transfer event to be emitted - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - vm.prank(bob); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Sally should be able to mint a token on behalf of bob. - function test_mint_secondaryMinter_succeeds() external { - _mockAllowlistTrueFor(bob); - - vm.expectEmit(true, true, true, true); - emit Transfer(address(0), bob, _getTokenId(bob)); - - // mint as sally instead of bob - vm.prank(sally); - optimist.mint(bob); - - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Bob should not be able to mint an NFT if he is not allowlisted. - function test_mint_forNonAllowlistedClaimer_reverts() external { - vm.prank(bob); - vm.expectRevert("Optimist: address is not on allowList"); - optimist.mint(bob); - } - - /// @notice Bob's tx should revert if he already minted. - function test_mint_forAlreadyMintedClaimer_reverts() external { - _attestAllowlist(bob); - - // mint initial nft with bob - vm.prank(bob); - optimist.mint(bob); - // expect the NFT to be owned by bob - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - - // attempt to mint again - vm.expectRevert("ERC721: token already minted"); - optimist.mint(bob); - } - - /// @notice The baseURI should be set by attestation station by the baseURIAttestor. - function test_baseURI_returnsCorrectBaseURI_succeeds() external { - _attestBaseURI(base_uri); - - bytes memory data = abi.encodeCall( - attestationStation.attestations, - (carol_baseURIAttestor, address(optimist), optimist.BASE_URI_ATTESTATION_KEY()) - ); - vm.expectCall(address(attestationStation), data); - vm.prank(carol_baseURIAttestor); - - // assert baseURI is set - assertEq(optimist.baseURI(), base_uri); - } - - /// @notice tokenURI should return the token uri for a minted token. - function test_tokenURI_returnsCorrectTokenURI_succeeds() external { - // we are using true but it can be any non empty value - _attestBaseURI(base_uri); - - // mint an NFT - _mockAllowlistTrueFor(bob); - vm.prank(bob); - optimist.mint(bob); - - // assert tokenURI is set - assertEq(optimist.baseURI(), base_uri); - assertEq( - optimist.tokenURI(_getTokenId(bob)), - "https://storageapi.fleek.co/6442819a1b05-bucket/optimist-nft/attributes/0x1d96f2f6bef1202e4ce1ff6dad0c2cb002861d3e.json" - ); - } - - /// @notice Should return the token id of the owner. - function test_tokenIdOfAddress_returnsOwnerID_succeeds() external { - uint256 willTokenId = 1024; - address will = address(1024); - - _mockAllowlistTrueFor(will); - - optimist.mint(will); - - assertEq(optimist.tokenIdOfAddress(will), willTokenId); - } - - /// @notice transferFrom should revert since Optimist is a SBT. - function test_transferFrom_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.transferFrom(bob, sally, _getTokenId(bob)); - - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.safeTransferFrom(bob, sally, _getTokenId(bob)); - // attempt to transfer to sally - vm.expectRevert(bytes("Optimist: soul bound token")); - vm.prank(bob); - optimist.safeTransferFrom(bob, sally, _getTokenId(bob), bytes("0x")); - } - - /// @notice approve should revert since Optimist is a SBT. - function test_approve_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // attempt to approve sally - vm.prank(bob); - vm.expectRevert("Optimist: soul bound token"); - optimist.approve(address(attestationStation), _getTokenId(bob)); - - assertEq(optimist.getApproved(_getTokenId(bob)), address(0)); - } - - /// @notice setApprovalForAll should revert since Optimist is a SBT. - function test_setApprovalForAll_soulbound_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - vm.prank(alice_allowlistAttestor); - vm.expectRevert(bytes("Optimist: soul bound token")); - optimist.setApprovalForAll(alice_allowlistAttestor, true); - - // expect approval amount to stil be 0 - assertEq(optimist.getApproved(_getTokenId(bob)), address(0)); - // isApprovedForAll should return false - assertEq(optimist.isApprovedForAll(alice_allowlistAttestor, alice_allowlistAttestor), false); - } - - /// @notice Only owner should be able to burn token. - function test_burn_byOwner_succeeds() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - // burn as bob - vm.prank(bob); - optimist.burn(_getTokenId(bob)); - - // expect bob to have no balance now - assertEq(optimist.balanceOf(bob), 0); - } - - /// @notice Non-owner attempting to burn token should revert. - function test_burn_byNonOwner_reverts() external { - _mockAllowlistTrueFor(bob); - - // mint as bob - vm.prank(bob); - optimist.mint(bob); - - vm.expectRevert("ERC721: caller is not token owner nor approved"); - // burn as Sally - vm.prank(sally); - optimist.burn(_getTokenId(bob)); - - // expect bob to have still have the token - assertEq(optimist.balanceOf(bob), 1); - } - - /// @notice Should support ERC-721 interface. - function test_supportsInterface_returnsCorrectInterfaceForERC721_succeeds() external view { - bytes4 iface721 = type(IERC721).interfaceId; - // check that it supports ERC-721 interface - assertEq(optimist.supportsInterface(iface721), true); - } - - /// @notice Checking that multi-call using the invite & claim flow works correctly, since the - /// frontend will be making multicalls to improve UX. The OptimistInviter.claimInvite - /// and Optimist.mint will be batched - function test_multicall_batchingClaimAndMint_succeeds() external { - uint256 inviterPrivateKey = 0xbeefbeef; - address inviter = vm.addr(inviterPrivateKey); - - address[] memory addresses = new address[](1); - addresses[0] = inviter; - - vm.prank(eve_inviteGranter); - - // grant invites to Inviter; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(inviter); - - // EIP-712 sign with Inviter's private key - - (uint8 v, bytes32 r, bytes32 s) = vm.sign(inviterPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - bytes memory signature = abi.encodePacked(r, s, v); - - bytes32 hashedCommit = keccak256(abi.encode(bob, signature)); - - // commit the invite - vm.prank(bob); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - IMulticall3.Call3[] memory calls = new IMulticall3.Call3[](2); - - // First call is to claim the invite, receiving the attestation - calls[0] = IMulticall3.Call3({ - target: address(optimistInviter), - callData: abi.encodeCall(OptimistInviter.claimInvite, (bob, claimableInvite, signature)), - allowFailure: false - }); - - // Second call is to mint the Optimist NFT - calls[1] = IMulticall3.Call3({ - target: address(optimist), - callData: abi.encodeCall(Optimist.mint, (bob)), - allowFailure: false - }); - - multicall3.aggregate3(calls); - - assertTrue(optimist.isOnAllowList(bob)); - assertEq(optimist.ownerOf(_getTokenId(bob)), bob); - assertEq(optimist.balanceOf(bob), 1); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol deleted file mode 100644 index c0c2aef2bce..00000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/OptimistAllowlist.t.sol +++ /dev/null @@ -1,227 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistAllowlist } from "src/periphery/op-nft/OptimistAllowlist.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -contract OptimistAllowlist_Initializer is Test { - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - address internal alice_allowlistAttestor; - address internal sally_coinbaseQuestAttestor; - address internal ted; - - uint256 internal bobPrivateKey; - address internal bob; - - AttestationStation attestationStation; - OptimistAllowlist optimistAllowlist; - OptimistInviter optimistInviter; - - // Helps with EIP-712 signature generation - OptimistInviterHelper optimistInviterHelper; - - function setUp() public { - alice_allowlistAttestor = makeAddr("alice_allowlistAttestor"); - sally_coinbaseQuestAttestor = makeAddr("sally_coinbaseQuestAttestor"); - ted = makeAddr("ted"); - - bobPrivateKey = 0xB0B0B0B0; - bob = vm.addr(bobPrivateKey); - vm.label(bob, "bob"); - - // Give alice and bob and sally some ETH - vm.deal(alice_allowlistAttestor, 1 ether); - vm.deal(sally_coinbaseQuestAttestor, 1 ether); - vm.deal(bob, 1 ether); - vm.deal(ted, 1 ether); - - _initializeContracts(); - } - - function attestAllowlist(address _about) internal { - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = AttestationStation.AttestationData({ - about: _about, - key: optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), - val: bytes("true") - }); - vm.prank(alice_allowlistAttestor); - attestationStation.attest(attestationData); - } - - function attestCoinbaseQuest(address _about) internal { - AttestationStation.AttestationData[] memory attestationData = new AttestationStation.AttestationData[](1); - // we are using true but it can be any non empty value - attestationData[0] = AttestationStation.AttestationData({ - about: _about, - key: optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(), - val: bytes("true") - }); - vm.prank(sally_coinbaseQuestAttestor); - attestationStation.attest(attestationData); - } - - function inviteAndClaim(address claimer) internal { - address[] memory addresses = new address[](1); - addresses[0] = bob; - - vm.prank(alice_allowlistAttestor); - - // grant invites to Bob; - optimistInviter.setInviteCounts(addresses, 3); - - // issue a new invite - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(bob); - - // EIP-712 sign with Bob's private key - bytes memory signature = _getSignature(bobPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - - bytes32 hashedCommit = keccak256(abi.encode(claimer, signature)); - - // commit the invite - vm.prank(claimer); - optimistInviter.commitInvite(hashedCommit); - - // wait minimum commitment period - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - - // reveal and claim the invite - optimistInviter.claimInvite(claimer, claimableInvite, signature); - } - - /// @notice Get signature as a bytes blob, since SignatureChecker takes arbitrary signature blobs. - function _getSignature(uint256 _signingPrivateKey, bytes32 _digest) internal pure returns (bytes memory) { - (uint8 v, bytes32 r, bytes32 s) = vm.sign(_signingPrivateKey, _digest); - - bytes memory signature = abi.encodePacked(r, s, v); - return signature; - } - - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - - optimistInviter = new OptimistInviter(alice_allowlistAttestor, attestationStation); - optimistInviter.initialize("OptimistInviter"); - - optimistAllowlist = new OptimistAllowlist( - attestationStation, alice_allowlistAttestor, sally_coinbaseQuestAttestor, address(optimistInviter) - ); - - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - } -} - -contract OptimistAllowlistTest is OptimistAllowlist_Initializer { - function test_constructor_succeeds() external view { - // expect attestationStation to be set - assertEq(address(optimistAllowlist.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimistAllowlist.ALLOWLIST_ATTESTOR(), alice_allowlistAttestor); - assertEq(optimistAllowlist.COINBASE_QUEST_ATTESTOR(), sally_coinbaseQuestAttestor); - assertEq(address(optimistAllowlist.OPTIMIST_INVITER()), address(optimistInviter)); - } - - /// @notice Base case, a account without any relevant attestations should not be able to mint. - function test_isAllowedToMint_withoutAnyAttestations_fails() external view { - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice After receiving a valid allowlist attestation, the account should be able to mint. - function test_isAllowedToMint_fromAllowlistAttestor_succeeds() external { - attestAllowlist(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice After receiving a valid attestation from the Coinbase Quest attestor, - /// the account should be able to mint. - function test_isAllowedToMint_fromCoinbaseQuestAttestor_succeeds() external { - attestCoinbaseQuest(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Account that received an attestation from the OptimistInviter contract by going - /// through the claim invite flow should be able to mint. - function test_isAllowedToMint_fromInvite_succeeds() external { - inviteAndClaim(bob); - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Attestation from the wrong allowlist attestor should not allow minting. - function test_isAllowedToMint_fromWrongAllowlistAttestor_fails() external { - // Ted is not the allowlist attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Coinbase quest attestation from wrong attestor should not allow minting. - function test_isAllowedToMint_fromWrongCoinbaseQuestAttestor_fails() external { - // Ted is not the coinbase quest attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.COINBASE_QUEST_ELIGIBLE_ATTESTATION_KEY(), bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Claiming an invite on the non-official OptimistInviter contract should not allow - /// minting. - function test_isAllowedToMint_fromWrongOptimistInviter_fails() external { - vm.prank(ted); - attestationStation.attest(bob, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, bytes("true")); - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having multiple signals, even if one is invalid, should still allow minting. - function test_isAllowedToMint_withMultipleAttestations_succeeds() external { - attestAllowlist(bob); - attestCoinbaseQuest(bob); - inviteAndClaim(bob); - - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - - // A invalid attestation, as Ted is not allowlist attestor - vm.prank(ted); - attestationStation.attest(bob, optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(), bytes("true")); - - // Since Bob has at least one valid attestation, he should be allowed to mint - assertTrue(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having falsy attestation value should not allow minting. - function test_isAllowedToMint_fromAllowlistAttestorWithFalsyValue_fails() external { - // First sends correct attestation - attestAllowlist(bob); - - bytes32 key = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - vm.expectEmit(true, true, true, false); - emit AttestationCreated(alice_allowlistAttestor, bob, key, bytes("dsafsds")); - - // Invalidates existing attestation - vm.prank(alice_allowlistAttestor); - attestationStation.attest(bob, key, bytes("")); - - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } - - /// @notice Having falsy attestation value from Coinbase attestor should not allow minting. - function test_isAllowedToMint_fromCoinbaseQuestAttestorWithFalsyValue_fails() external { - // First sends correct attestation - attestAllowlist(bob); - - bytes32 key = optimistAllowlist.OPTIMIST_CAN_MINT_ATTESTATION_KEY(); - vm.expectEmit(true, true, true, true); - emit AttestationCreated(alice_allowlistAttestor, bob, key, bytes("")); - - // Invalidates existing attestation - vm.prank(alice_allowlistAttestor); - attestationStation.attest(bob, key, bytes("")); - - assertFalse(optimistAllowlist.isAllowedToMint(bob)); - } -} diff --git a/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol b/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol deleted file mode 100644 index 58e71a13a8d..00000000000 --- a/packages/contracts-bedrock/test/periphery/op-nft/OptimistInviter.t.sol +++ /dev/null @@ -1,529 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.15; - -// Testing utilities -import { Test } from "forge-std/Test.sol"; -import { AttestationStation } from "src/periphery/op-nft/AttestationStation.sol"; -import { OptimistInviter } from "src/periphery/op-nft/OptimistInviter.sol"; -import { Optimist } from "src/periphery/op-nft/Optimist.sol"; -import { TestERC1271Wallet } from "test/mocks/TestERC1271Wallet.sol"; -import { OptimistInviterHelper } from "test/mocks/OptimistInviterHelper.sol"; -import { OptimistConstants } from "src/periphery/op-nft/libraries/OptimistConstants.sol"; - -contract OptimistInviter_Initializer is Test { - event InviteClaimed(address indexed issuer, address indexed claimer); - event Initialized(uint8 version); - event Transfer(address indexed from, address indexed to, uint256 indexed tokenId); - event AttestationCreated(address indexed creator, address indexed about, bytes32 indexed key, bytes val); - - bytes32 EIP712_DOMAIN_TYPEHASH; - - address internal alice_inviteGranter; - address internal sally; - address internal ted; - address internal eve; - - address internal bob; - uint256 internal bobPrivateKey; - address internal carol; - uint256 internal carolPrivateKey; - - TestERC1271Wallet carolERC1271Wallet; - - AttestationStation attestationStation; - OptimistInviter optimistInviter; - - OptimistInviterHelper optimistInviterHelper; - - function setUp() public { - alice_inviteGranter = makeAddr("alice_inviteGranter"); - sally = makeAddr("sally"); - ted = makeAddr("ted"); - eve = makeAddr("eve"); - - bobPrivateKey = 0xB0B0B0B0; - bob = vm.addr(bobPrivateKey); - - carolPrivateKey = 0xC0C0C0C0; - carol = vm.addr(carolPrivateKey); - - carolERC1271Wallet = new TestERC1271Wallet(carol); - - // Give alice and bob and sally some ETH - vm.deal(alice_inviteGranter, 1 ether); - vm.deal(bob, 1 ether); - vm.deal(sally, 1 ether); - vm.deal(ted, 1 ether); - vm.deal(eve, 1 ether); - - EIP712_DOMAIN_TYPEHASH = - keccak256("EIP712Domain(string name,string version,uint256 chainId,address verifyingContract)"); - - _initializeContracts(); - } - - /// @notice Instantiates an AttestationStation, and an OptimistInviter. - function _initializeContracts() internal { - attestationStation = new AttestationStation(); - - optimistInviter = new OptimistInviter(alice_inviteGranter, attestationStation); - - vm.expectEmit(true, true, true, true, address(optimistInviter)); - emit Initialized(1); - optimistInviter.initialize("OptimistInviter"); - - optimistInviterHelper = new OptimistInviterHelper(optimistInviter, "OptimistInviter"); - } - - function _passMinCommitmentPeriod() internal { - vm.warp(optimistInviter.MIN_COMMITMENT_PERIOD() + block.timestamp); - } - - /// @notice Returns a user's current invite count, as stored in the AttestationStation. - function _getInviteCount(address _issuer) internal view returns (uint256) { - return optimistInviter.inviteCounts(_issuer); - } - - /// @notice Returns true if claimer has the proper attestation from OptimistInviter to mint. - function _hasMintAttestation(address _claimer) internal view returns (bool) { - bytes memory attestation = attestationStation.attestations( - address(optimistInviter), _claimer, OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY - ); - return attestation.length > 0; - } - - /// @notice Get signature as a bytes blob, since SignatureChecker takes arbitrary signature blobs. - function _getSignature(uint256 _signingPrivateKey, bytes32 _digest) internal pure returns (bytes memory) { - (uint8 v, bytes32 r, bytes32 s) = vm.sign(_signingPrivateKey, _digest); - - bytes memory signature = abi.encodePacked(r, s, v); - return signature; - } - - /// @notice Signs a claimable invite with the given private key and returns the signature using - /// correct EIP712 domain separator. - function _issueInviteAs(uint256 _privateKey) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - return _issueInviteWithEIP712Domain( - _privateKey, - bytes("OptimistInviter"), - bytes(optimistInviter.EIP712_VERSION()), - block.chainid, - address(optimistInviter) - ); - } - - /// @notice Signs a claimable invite with the given private key and returns the signature using - /// the given EIP712 domain separator. This assumes that the issuer's address is the - /// corresponding public key to _issuerPrivateKey. - function _issueInviteWithEIP712Domain( - uint256 _issuerPrivateKey, - bytes memory _eip712Name, - bytes memory _eip712Version, - uint256 _eip712Chainid, - address _eip712VerifyingContract - ) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - address issuer = vm.addr(_issuerPrivateKey); - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(issuer); - return ( - claimableInvite, - _getSignature( - _issuerPrivateKey, - optimistInviterHelper.getDigestWithEIP712Domain( - claimableInvite, _eip712Name, _eip712Version, _eip712Chainid, _eip712VerifyingContract - ) - ) - ); - } - - /// @notice Commits a signature and claimer address to the OptimistInviter contract. - function _commitInviteAs(address _as, bytes memory _signature) internal { - vm.prank(_as); - bytes32 hashedSignature = keccak256(abi.encode(_as, _signature)); - optimistInviter.commitInvite(hashedSignature); - - // Check that the commitment was stored correctly - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Signs a claimable invite with the given private key. The claimer commits then claims - /// the invite. Checks that all expected events are emitted and that state is updated - /// correctly. Returns the signature and invite for use in tests. - function _issueThenClaimShouldSucceed( - uint256 _issuerPrivateKey, - address _claimer - ) - internal - returns (OptimistInviter.ClaimableInvite memory, bytes memory) - { - address issuer = vm.addr(_issuerPrivateKey); - uint256 prevInviteCount = _getInviteCount(issuer); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = - _issueInviteAs(_issuerPrivateKey); - - _commitInviteAs(_claimer, signature); - - // The hash(claimer ++ signature) should be committed - assertEq(optimistInviter.commitmentTimestamps(keccak256(abi.encode(_claimer, signature))), block.timestamp); - - _passMinCommitmentPeriod(); - - // OptimistInviter should issue a new attestation allowing claimer to mint - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - _claimer, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(issuer) - ); - - // Should emit an event indicating that the invite was claimed - vm.expectEmit(true, false, false, false, address(optimistInviter)); - emit InviteClaimed(issuer, _claimer); - - vm.prank(_claimer); - optimistInviter.claimInvite(_claimer, claimableInvite, signature); - - // The nonce that issuer used should be marked as used - assertTrue(optimistInviter.usedNonces(issuer, claimableInvite.nonce)); - - // Issuer should have one less invite - assertEq(prevInviteCount - 1, _getInviteCount(issuer)); - - // Claimer should have the mint attestation from the OptimistInviter contract - assertTrue(_hasMintAttestation(_claimer)); - - return (claimableInvite, signature); - } - - /// @notice Issues 3 invites to the given address. Checks that all expected events are emitted - /// and that state is updated correctly. - function _grantInvitesTo(address _to) internal { - address[] memory addresses = new address[](1); - addresses[0] = _to; - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), _to, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.prank(alice_inviteGranter); - optimistInviter.setInviteCounts(addresses, 3); - - assertEq(_getInviteCount(_to), 3); - } -} - -contract OptimistInviterTest is OptimistInviter_Initializer { - function test_initialize_succeeds() external view { - // expect attestationStation to be set - assertEq(address(optimistInviter.ATTESTATION_STATION()), address(attestationStation)); - assertEq(optimistInviter.INVITE_GRANTER(), alice_inviteGranter); - } - - /// @notice Alice the admin should be able to give Bob, Sally, and Carol 3 invites, and the - /// OptimistInviter contract should increment invite counts on inviteCounts and issue - /// 'optimist.can-invite' attestations. - function test_grantInvites_adminAddingInvites_succeeds() external { - address[] memory addresses = new address[](3); - addresses[0] = bob; - addresses[1] = sally; - addresses[2] = address(carolERC1271Wallet); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), bob, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), sally, optimistInviter.CAN_INVITE_ATTESTATION_KEY(), bytes("true") - ); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - address(carolERC1271Wallet), - optimistInviter.CAN_INVITE_ATTESTATION_KEY(), - bytes("true") - ); - - vm.prank(alice_inviteGranter); - optimistInviter.setInviteCounts(addresses, 3); - - assertEq(_getInviteCount(bob), 3); - assertEq(_getInviteCount(sally), 3); - assertEq(_getInviteCount(address(carolERC1271Wallet)), 3); - } - - /// @notice Bob, who is not the invite granter, should not be able to issue invites. - function test_grantInvites_nonAdminAddingInvites_reverts() external { - address[] memory addresses = new address[](2); - addresses[0] = bob; - addresses[1] = sally; - - vm.expectRevert("OptimistInviter: only invite granter can grant invites"); - vm.prank(bob); - optimistInviter.setInviteCounts(addresses, 3); - } - - /// @notice Sally should be able to commit an invite given by by Bob. - function test_commitInvite_committingForYourself_succeeds() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(sally, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Sally should be able to Bob's for a different claimer, Eve. - function test_commitInvite_committingForSomeoneElse_succeeds() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(eve, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - } - - /// @notice Attempting to commit the same hash twice should revert. This prevents griefing. - function test_commitInvite_committingSameHashTwice_reverts() external { - _grantInvitesTo(bob); - (, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(sally); - bytes32 hashedSignature = keccak256(abi.encode(eve, signature)); - optimistInviter.commitInvite(hashedSignature); - - assertEq(optimistInviter.commitmentTimestamps(hashedSignature), block.timestamp); - - vm.expectRevert("OptimistInviter: commitment already made"); - optimistInviter.commitInvite(hashedSignature); - } - - /// @notice Bob issues signature, and Sally claims the invite. Bob's invite count should be - /// decremented, and Sally should be able to mint. - function test_claimInvite_succeeds() external { - _grantInvitesTo(bob); - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - } - - /// @notice Bob issues signature, and Ted commits the invite for Sally. Eve claims for Sally. - function test_claimInvite_claimForSomeoneElse_succeeds() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.prank(ted); - optimistInviter.commitInvite(keccak256(abi.encode(sally, signature))); - _passMinCommitmentPeriod(); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - sally, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(bob) - ); - - // Should emit an event indicating that the invite was claimed - vm.expectEmit(true, true, true, true, address(optimistInviter)); - emit InviteClaimed(bob, sally); - - vm.prank(eve); - optimistInviter.claimInvite(sally, claimableInvite, signature); - - assertEq(_getInviteCount(bob), 2); - assertTrue(_hasMintAttestation(sally)); - assertFalse(_hasMintAttestation(eve)); - } - - function test_claimInvite_claimBeforeMinCommitmentPeriod_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - _commitInviteAs(sally, signature); - - // Some time passes, but not enough to meet the minimum commitment period - vm.warp(block.timestamp + 10); - - vm.expectRevert("OptimistInviter: minimum commitment period has not elapsed yet"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Signature issued for previous versions of the contract should fail. - function test_claimInvite_usingSignatureIssuedForDifferentVersion_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", "0.9.1", block.chainid, address(optimistInviter) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Replay attack for signature issued for contract on different chain (ie. mainnet) - /// should fail. - function test_claimInvite_usingSignatureIssuedForDifferentChain_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", bytes(optimistInviter.EIP712_VERSION()), 1, address(optimistInviter) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Replay attack for signature issued for instantiation of the OptimistInviter contract - /// on a different address should fail. - function test_claimInvite_usingSignatureIssuedForDifferentContract_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteWithEIP712Domain( - bobPrivateKey, "OptimismInviter", bytes(optimistInviter.EIP712_VERSION()), block.chainid, address(0xBEEF) - ); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Attempting to claim again using the same signature again should fail. - function test_claimInvite_replayingUsedNonce_reverts() external { - _grantInvitesTo(bob); - - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - - // Sally tries to claim the invite using the same signature - vm.expectRevert("OptimistInviter: nonce has already been used"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - - // Carol tries to claim the invite using the same signature - _commitInviteAs(carol, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: nonce has already been used"); - vm.prank(carol); - optimistInviter.claimInvite(carol, claimableInvite, signature); - } - - /// @notice Issuing signatures through a contract that implements ERC1271 should succeed (ie. - /// Gnosis Safe or other smart contract wallets). Carol is using a ERC1271 contract - /// wallet that is simply backed by her private key. - function test_claimInvite_usingERC1271Wallet_succeeds() external { - _grantInvitesTo(address(carolERC1271Wallet)); - - OptimistInviter.ClaimableInvite memory claimableInvite = - optimistInviterHelper.getClaimableInviteWithNewNonce(address(carolERC1271Wallet)); - - bytes memory signature = _getSignature(carolPrivateKey, optimistInviterHelper.getDigest(claimableInvite)); - - // Sally tries to claim the invite - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectEmit(true, true, true, true, address(attestationStation)); - emit AttestationCreated( - address(optimistInviter), - sally, - OptimistConstants.OPTIMIST_CAN_MINT_FROM_INVITE_ATTESTATION_KEY, - abi.encode(address(carolERC1271Wallet)) - ); - - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - assertEq(_getInviteCount(address(carolERC1271Wallet)), 2); - } - - /// @notice Claimer must commit the signature before claiming the invite. Sally attempts to - /// claim the Bob's invite without committing the signature first. - function test_claimInvite_withoutCommittingHash_reverts() external { - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - vm.expectRevert("OptimistInviter: claimer and signature have not been committed yet"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Using a signature that doesn't correspond to the claimable invite should fail. - function test_claimInvite_withIncorrectSignature_reverts() external { - _grantInvitesTo(carol); - _grantInvitesTo(bob); - (OptimistInviter.ClaimableInvite memory bobClaimableInvite, bytes memory bobSignature) = - _issueInviteAs(bobPrivateKey); - (, bytes memory carolSignature) = _issueInviteAs(carolPrivateKey); - - _commitInviteAs(sally, bobSignature); - _commitInviteAs(sally, carolSignature); - - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: invalid signature"); - vm.prank(sally); - optimistInviter.claimInvite(sally, bobClaimableInvite, carolSignature); - } - - /// @notice Attempting to use a signature from a issuer who never was granted invites should - /// fail. - function test_claimInvite_whenIssuerNeverReceivedInvites_reverts() external { - // Bob was never granted any invites, but issues an invite for Eve - (OptimistInviter.ClaimableInvite memory claimableInvite, bytes memory signature) = _issueInviteAs(bobPrivateKey); - - _commitInviteAs(sally, signature); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: issuer has no invites"); - vm.prank(sally); - optimistInviter.claimInvite(sally, claimableInvite, signature); - } - - /// @notice Attempting to use a signature from a issuer who has no more invites should fail. - /// Bob has 3 invites, but issues 4 invites for Sally, Carol, Ted, and Eve. Only the - /// first 3 invites should be claimable. The last claimer, Eve, should not be able to - /// claim the invite. - function test_claimInvite_whenIssuerHasNoInvitesLeft_reverts() external { - _grantInvitesTo(bob); - - _issueThenClaimShouldSucceed(bobPrivateKey, sally); - _issueThenClaimShouldSucceed(bobPrivateKey, carol); - _issueThenClaimShouldSucceed(bobPrivateKey, ted); - - assertEq(_getInviteCount(bob), 0); - - (OptimistInviter.ClaimableInvite memory claimableInvite4, bytes memory signature4) = - _issueInviteAs(bobPrivateKey); - - _commitInviteAs(eve, signature4); - _passMinCommitmentPeriod(); - - vm.expectRevert("OptimistInviter: issuer has no invites"); - vm.prank(eve); - optimistInviter.claimInvite(eve, claimableInvite4, signature4); - - assertEq(_getInviteCount(bob), 0); - } -} diff --git a/packages/contracts-bedrock/test/universal/SafeSend.t.sol b/packages/contracts-bedrock/test/universal/SafeSend.t.sol new file mode 100644 index 00000000000..9b2f930fd13 --- /dev/null +++ b/packages/contracts-bedrock/test/universal/SafeSend.t.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { SafeSend } from "src/universal/SafeSend.sol"; +import { CommonTest } from "test/setup/CommonTest.sol"; + +contract SafeSendTest is CommonTest { + /// @notice Tests that sending to an EOA succeeds. + function test_send_toEOA_succeeds() public { + assertNotEq(alice, address(0)); + assertNotEq(bob, address(0)); + assertEq(bob.code.length, 0); + + vm.deal(alice, 100 ether); + + uint256 aliceBalanceBefore = alice.balance; + uint256 bobBalanceBefore = bob.balance; + + vm.prank(alice); + SafeSend safeSend = new SafeSend{ value: 100 ether }(payable(bob)); + + assertEq(address(safeSend).code.length, 0); + assertEq(address(safeSend).balance, 0); + assertEq(alice.balance, aliceBalanceBefore - 100 ether); + assertEq(bob.balance, bobBalanceBefore + 100 ether); + } + + /// @notice Tests that sending to a contract succeeds without executing the + /// contract's code. + function test_send_toContract_succeeds() public { + // etch reverting code into bob + vm.etch(bob, hex"fe"); + vm.deal(alice, 100 ether); + + uint256 aliceBalanceBefore = alice.balance; + uint256 bobBalanceBefore = bob.balance; + + vm.prank(alice); + SafeSend safeSend = new SafeSend{ value: 100 ether }(payable(bob)); + + assertEq(address(safeSend).code.length, 0); + assertEq(address(safeSend).balance, 0); + assertEq(alice.balance, aliceBalanceBefore - 100 ether); + assertEq(bob.balance, bobBalanceBefore + 100 ether); + } +} diff --git a/packages/contracts-bedrock/test/universal/Specs.t.sol b/packages/contracts-bedrock/test/universal/Specs.t.sol index 5150b4d725d..49e35e835f3 100644 --- a/packages/contracts-bedrock/test/universal/Specs.t.sol +++ b/packages/contracts-bedrock/test/universal/Specs.t.sol @@ -16,6 +16,7 @@ import { IOptimismPortal } from "src/L1/interfaces/IOptimismPortal.sol"; import { IOptimismPortal2 } from "src/L1/interfaces/IOptimismPortal2.sol"; import { IOptimismPortalInterop } from "src/L1/interfaces/IOptimismPortalInterop.sol"; import { ISystemConfig } from "src/L1/interfaces/ISystemConfig.sol"; +import { ISystemConfigInterop } from "src/L1/interfaces/ISystemConfigInterop.sol"; import { IDataAvailabilityChallenge } from "src/L1/interfaces/IDataAvailabilityChallenge.sol"; import { IProtocolVersions } from "src/L1/interfaces/IProtocolVersions.sol"; @@ -106,14 +107,6 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "DataAvailabilityChallenge", _sel: IDataAvailabilityChallenge.resolve.selector }); _addSpec({ _name: "DataAvailabilityChallenge", _sel: IDataAvailabilityChallenge.unlockBond.selector }); - // DelayedVetoable - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("delay()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("initiator()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("queuedAt(bytes32)") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("target()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("version()") }); - _addSpec({ _name: "DelayedVetoable", _sel: _getSel("vetoer()") }); - // L1CrossDomainMessenger _addSpec({ _name: "L1CrossDomainMessenger", _sel: _getSel("MESSAGE_VERSION()") }); _addSpec({ _name: "L1CrossDomainMessenger", _sel: _getSel("MIN_GAS_CALLDATA_OVERHEAD()") }); @@ -487,36 +480,37 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("gasLimit()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("eip1559Denominator()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("eip1559Elasticity()") }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.initialize.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.initialize.selector }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.minimumGasLimit.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.minimumGasLimit.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("overhead()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("owner()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("renounceOwnership()"), _auth: Role.SYSTEMCONFIGOWNER }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.resourceConfig.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.resourceConfig.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("scalar()") }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setBatcherHash.selector, + _sel: ISystemConfigInterop.setBatcherHash.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setGasConfig.selector, + _sel: ISystemConfigInterop.setGasConfig.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setGasLimit.selector, + _sel: ISystemConfigInterop.setGasLimit.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setEIP1559Params.selector, + _sel: ISystemConfigInterop.setEIP1559Params.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ _name: "SystemConfigInterop", - _sel: ISystemConfig.setUnsafeBlockSigner.selector, + _sel: ISystemConfigInterop.setUnsafeBlockSigner.selector, _auth: Role.SYSTEMCONFIGOWNER }); _addSpec({ @@ -524,7 +518,7 @@ contract Specification_Test is CommonTest { _sel: _getSel("transferOwnership(address)"), _auth: Role.SYSTEMCONFIGOWNER }); - _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfig.unsafeBlockSigner.selector }); + _addSpec({ _name: "SystemConfigInterop", _sel: ISystemConfigInterop.unsafeBlockSigner.selector }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("version()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("l1CrossDomainMessenger()") }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("l1ERC721Bridge()") }); @@ -560,12 +554,6 @@ contract Specification_Test is CommonTest { _auth: Role.DEPENDENCYMANAGER }); _addSpec({ _name: "SystemConfigInterop", _sel: _getSel("dependencyManager()") }); - _addSpec({ - _name: "SystemConfigInterop", - _sel: _getSel( - "initialize(address,uint32,uint32,bytes32,uint64,address,(uint32,uint8,uint8,uint32,uint32,uint128),address,(address,address,address,address,address,address,address),address)" - ) - }); // ProxyAdmin _addSpec({ _name: "ProxyAdmin", _sel: _getSel("addressManager()") }); @@ -849,27 +837,25 @@ contract Specification_Test is CommonTest { _addSpec({ _name: "OPContractsManager", _sel: _getSel("version()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPContractsManager", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPContractsManager", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManager", _sel: _getSel("l1ContractsRelease()") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManager", _sel: _getSel("OUTPUT_VERSION()") }); - _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.blueprints.selector }); _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); + _addSpec({ _name: "OPContractsManager", _sel: OPContractsManager.implementations.selector }); // OPContractsManagerInterop _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("version()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("superchainConfig()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("protocolVersions()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("latestRelease()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("implementations(string,string)") }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("l1ContractsRelease()") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("systemConfigs(uint256)") }); _addSpec({ _name: "OPContractsManagerInterop", _sel: _getSel("OUTPUT_VERSION()") }); - _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.initialize.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.deploy.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.blueprints.selector }); _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.chainIdToBatchInboxAddress.selector }); + _addSpec({ _name: "OPContractsManagerInterop", _sel: OPContractsManager.implementations.selector }); // DeputyGuardianModule _addSpec({ diff --git a/versions.json b/versions.json index 5a2ae52b57f..e734259955f 100644 --- a/versions.json +++ b/versions.json @@ -10,5 +10,6 @@ "kontrol": "1.0.53", "just": "1.34.0", "binary_signer": "1.0.4", - "semgrep": "1.90.0" + "semgrep": "1.90.0", + "asterisc": "v1.1.2" }