diff --git a/cel2-migration-test/.envrc b/cel2-migration-test/.envrc new file mode 100644 index 0000000000000..761585366af68 --- /dev/null +++ b/cel2-migration-test/.envrc @@ -0,0 +1,30 @@ +export DEPLOYMENT_CONTEXT=cel2-migration +export IMPL_SALT=$(openssl rand -hex 32) + +export L1_RPC_URL="http://localhost:8545" +export L1_RPC_KIND=debug_geth + +export L2_RPC_URL="http://localhost:9545" + +export GS_ADMIN_ADDRESS="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +export GS_ADMIN_PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + +export GS_BATCHER_ADDRESS="0x70997970C51812dc3A010C7d01b50e0d17dc79C8" +export GS_BATCHER_PRIVATE_KEY="0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d" + +export GS_PROPOSER_ADDRESS="0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC" +export GS_PROPOSER_PRIVATE_KEY="0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a" + +export GS_SEQUENCER_ADDRESS="0x90F79bf6EB2c4f870365E785982E1f101E93b906" +export GS_SEQUENCER_PRIVATE_KEY="0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6" + +export DG_TYPE=0 +export PROPOSAL_INTERVAL="10s" + +if [ -f "config/deployment-l1.json" ]; then + # TODO(pl): FInd out why we cannot set both + # export L2OO_ADDRESS=$(cat config/deployment-l1.json | jq -r .L2OutputOracleProxy) + export DGF_ADDRESS=$(cat config/deployment-l1.json | jq -r .DisputeGameFactoryProxy) +fi + + diff --git a/cel2-migration-test/Dockerfile.l1 b/cel2-migration-test/Dockerfile.l1 new file mode 100644 index 0000000000000..995d15e57a395 --- /dev/null +++ b/cel2-migration-test/Dockerfile.l1 @@ -0,0 +1,9 @@ +FROM ethereum/client-go:v1.13.11 + +RUN apk add --no-cache jq bash + +COPY entrypoint-l1.sh /entrypoint.sh + +VOLUME ["/db"] + +ENTRYPOINT ["/bin/bash", "/entrypoint.sh"] diff --git a/cel2-migration-test/Dockerfile.l2 b/cel2-migration-test/Dockerfile.l2 new file mode 100644 index 0000000000000..1a639b3ceda16 --- /dev/null +++ b/cel2-migration-test/Dockerfile.l2 @@ -0,0 +1,11 @@ +# TODO: build arm64 images +# This currently requires op-geth from https://github.com/celo-org/op-geth/tree/alecps/piersy/data-migration +FROM --platform=linux/amd64 us-west1-docker.pkg.dev/blockchaintestsglobaltestnet/dev-images/op-geth:3d6a0e48e00137e581ee064db9cafa8300598771 + +RUN apk add --no-cache jq + +COPY entrypoint-l2.sh /entrypoint.sh + +# VOLUME ["/db"] + +ENTRYPOINT ["/bin/sh", "/entrypoint.sh"] diff --git a/cel2-migration-test/README.md b/cel2-migration-test/README.md new file mode 100644 index 0000000000000..6d31f51d8ca47 --- /dev/null +++ b/cel2-migration-test/README.md @@ -0,0 +1,82 @@ +# Migration testing setup + +This directory contains files for running a local L2 with the purpose of testing +the migration. Most services are run via docker compose, but there are some +scripts that need to be run manually (right now). + +In general services can be started by `docker compose up `. If you +want to start in a detached mode, add `-d` to the command. Logs can be watched +with `docker compose logs `. Add `-f` to follow them. + +All commands are expected to be run from inside this directory. + +## Walkthrough + +1. **Start the L1** + + The L1 is a `geth` client running with the clique consensus engine in the + `l1` service. Is uses the genesis file at `genesis-l1.json` to initialize the + chain and fund important accounts that will be used in later steps. + + Start the L1 by running `docker compose up l1`. This will initialize the + chain, the data dir is `data/l1`. + +1. **Deploy the OP L1 contracts** + + Now it is necessary to deploy the L1 contracts. This is done by the + `deploy-l1-contracts.sh` script. + + Start the script `./deploy-l1-contracts.sh`. This creates a config file + (`config/config.json`) and a file containing L1 addresses + (`deployment-l1.json`). + + Running the script overwrites prior deployments. If it fails without a + readable error message, it might be caused by re-using an old salt. In this + case you can create a new one with `direnv allow`. + +1. **Setup Celo datadir and run migration** + + First we need to build the migration tool. + + ```sh + cd ../op-chain-ops # Assuming you're in cel2-migration-test + make celo-migrate + ``` + + Now it possible to setup the migration of a Celo datadir. Copy the datadir + of the node you want into `data/l2` and name it `source`. This directory + will not be touched in the migration process. + + **Important**: Then update the `config/config.json` file under the + `l2ChainID` field with the chain id of the celo chain. + + Run the migration by executing `./migrate-state.sh`. It should finish with + the message "Finished migration successfully!" and a new directory `migrated` + in `data/l2`. Additionally two files will have been created: + + - `config/rollup-config.json` is the config required by `op-node`. + - `config/op-state-log.json` is purely informational and contains all state + that was written into the migrated state database. + +1. **Run `op-geth` on migrated state** + + Start `op-geth` with `docker compose up l2`. + + Make sure this prints the correct chain id and *Optimism* as the consensus + engine. Additionally, the merge should be configured and the *Cel2* hardfork + enabled. + + The execution client is now running and waiting for command from the + consensus client, `op-node`. + +1. **Run `op-node` on migrated state** + + Finally, we can start `op-node`: `docker compose up op-node`. + + This should show logs indicating that blocks are created on both the `l2` and + `op-node` services. + +1. **Run `op-batcher` and `op-proposer`** + + Run `docker compose up op-batcher op-proposer`. If this fails with unset + environment variables, reload the `.envrc` file with `direnv allow`. diff --git a/cel2-migration-test/config/config.sh b/cel2-migration-test/config/config.sh new file mode 100755 index 0000000000000..0f88ef4b1cd98 --- /dev/null +++ b/cel2-migration-test/config/config.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash + +# This script is used to generate the getting-started.json configuration file +# used in the Getting Started quickstart guide on the docs site. Avoids the +# need to have the getting-started.json committed to the repo since it's an +# invalid JSON file when not filled in, which is annoying. + +reqenv() { + if [ -z "${!1}" ]; then + echo "Error: environment variable '$1' is undefined" + exit 1 + fi +} + +# Check required environment variables +reqenv "GS_ADMIN_ADDRESS" +reqenv "GS_BATCHER_ADDRESS" +reqenv "GS_PROPOSER_ADDRESS" +reqenv "GS_SEQUENCER_ADDRESS" +reqenv "L1_RPC_URL" + +# Get the finalized block timestamp and hash +block=$(cast block --rpc-url "$L1_RPC_URL") +timestamp=$(echo "$block" | awk '/timestamp/ { print $2 }') +blockhash=$(echo "$block" | awk '/hash/ { print $2 }') + +# Generate the config file +config=$(cat << EOL +{ + "l1StartingBlockTag": "$blockhash", + + "l1ChainID": 900, + "l2ChainID": 949000, + "l2BlockTime": 2, + "l1BlockTime": 12, + + "maxSequencerDrift": 600, + "sequencerWindowSize": 3600, + "channelTimeout": 300, + + "p2pSequencerAddress": "$GS_SEQUENCER_ADDRESS", + "batchInboxAddress": "0xff00000000000000000000000000000000042069", + "batchSenderAddress": "$GS_BATCHER_ADDRESS", + + "l2OutputOracleSubmissionInterval": 120, + "l2OutputOracleStartingBlockNumber": 0, + "l2OutputOracleStartingTimestamp": $timestamp, + + "l2OutputOracleProposer": "$GS_PROPOSER_ADDRESS", + "l2OutputOracleChallenger": "$GS_ADMIN_ADDRESS", + + "finalizationPeriodSeconds": 12, + + "proxyAdminOwner": "$GS_ADMIN_ADDRESS", + "baseFeeVaultRecipient": "$GS_ADMIN_ADDRESS", + "l1FeeVaultRecipient": "$GS_ADMIN_ADDRESS", + "sequencerFeeVaultRecipient": "$GS_ADMIN_ADDRESS", + "finalSystemOwner": "$GS_ADMIN_ADDRESS", + "superchainConfigGuardian": "$GS_ADMIN_ADDRESS", + + "baseFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "l1FeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "sequencerFeeVaultMinimumWithdrawalAmount": "0x8ac7230489e80000", + "baseFeeVaultWithdrawalNetwork": 0, + "l1FeeVaultWithdrawalNetwork": 0, + "sequencerFeeVaultWithdrawalNetwork": 0, + + "gasPriceOracleOverhead": 2100, + "gasPriceOracleScalar": 1000000, + + "enableGovernance": false, + "governanceTokenSymbol": "OP", + "governanceTokenName": "Optimism", + "governanceTokenOwner": "$GS_ADMIN_ADDRESS", + + "l2GenesisBlockGasLimit": "0x1c9c380", + "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", + "l2GenesisRegolithTimeOffset": "0x0", + + "eip1559Denominator": 50, + "eip1559DenominatorCanyon": 250, + "eip1559Elasticity": 6, + + "l2GenesisDeltaTimeOffset": null, + "l2GenesisCanyonTimeOffset": "0x0", + + "systemConfigStartBlock": 0, + + "requiredProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recommendedProtocolVersion": "0x0000000000000000000000000000000000000000000000000000000000000000", + + "faultGameAbsolutePrestate": "0x03c7ae758795765c6664a5d39bf63841c71ff191e9189522bad8ebff5d4eca98", + "faultGameMaxDepth": 44, + "faultGameMaxDuration": 1200, + "faultGameGenesisBlock": 0, + "faultGameGenesisOutputRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "faultGameSplitDepth": 14, + + "preimageOracleMinProposalSize": 1800000, + "preimageOracleChallengePeriod": 86400, + "preimageOracleCancunActivationTimestamp": 0, + + "proofMaturityDelaySeconds": 12, + "disputeGameFinalityDelaySeconds": 6, + "respectedGameType": 0, + "useFaultProofs": false +} +EOL +) + +# Write the config file +echo "$config" > config/config.json diff --git a/cel2-migration-test/deploy-l1-contracts.sh b/cel2-migration-test/deploy-l1-contracts.sh new file mode 100755 index 0000000000000..3bdf631b71fa2 --- /dev/null +++ b/cel2-migration-test/deploy-l1-contracts.sh @@ -0,0 +1,21 @@ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +# Create config file +./config/config.sh +cp config/config.json ../packages/contracts-bedrock/deploy-config/${DEPLOYMENT_CONTEXT}.json + +# Deploy CREATE2 contract +codesize=$(cast codesize 0x4e59b44847b379578588920cA78FbF26c0B4956C --rpc-url $L1_RPC_URL) +if [[ $codesize =~ 0 ]]; then + cast publish --rpc-url $L1_RPC_URL 0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222 +fi + +# Deploy OP contracts +pushd ../packages/contracts-bedrock +forge script scripts/Deploy.s.sol:Deploy --private-key $GS_ADMIN_PRIVATE_KEY --broadcast --rpc-url $L1_RPC_URL +popd + +# Copy deployment information +cp ../packages/contracts-bedrock/deployments/${DEPLOYMENT_CONTEXT}/.deploy config/deployment-l1.json diff --git a/cel2-migration-test/docker-compose.yml b/cel2-migration-test/docker-compose.yml new file mode 100644 index 0000000000000..552341e1be33a --- /dev/null +++ b/cel2-migration-test/docker-compose.yml @@ -0,0 +1,195 @@ +volumes: + op_log: + + +services: + op_stack_go_builder: # Not an actual service, but builds the prerequisite go images + build: + context: ../ + dockerfile: ops/docker/op-stack-go/Dockerfile + args: + GIT_COMMIT: "dev" + GIT_DATE: "0" + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet + entrypoint: ["echo", "build complete"] + + l1: + build: + context: . + dockerfile: Dockerfile.l1 + ports: + - "8545:8545" + - "8546:8546" + - "7060:6060" + volumes: + - "${PWD}/data/l1:/db" + - "${PWD}/setup/genesis-l1.json:/genesis.json" + - "${PWD}/setup/jwt-secret.txt:/config/test-jwt-secret.txt" + environment: + GETH_MINER_RECOMMIT: 100m + + l2: + build: + context: . + dockerfile: Dockerfile.l2 + ports: + - "9545:8545" + - "8060:6060" + volumes: + - "${PWD}/data/l2/migrated:/db" + - "${PWD}/setup/jwt-secret.txt:/config/test-jwt-secret.txt" + entrypoint: # pass the L2 specific flags by overriding the entry-point and adding extra arguments + - "/bin/sh" + - "/entrypoint.sh" + - "--authrpc.jwtsecret=/config/test-jwt-secret.txt" + environment: + GETH_MINER_RECOMMIT: 100ms + + op-node: + depends_on: + # - op_stack_go_builder + - l1 + - l2 + build: + context: ../ + dockerfile: ./op-node/Dockerfile + args: + OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:devnet + command: > + op-node + --l1=ws://l1:8546 + --l2=http://l2:8551 + --l2.jwt-secret=/config/test-jwt-secret.txt + --sequencer.enabled + --sequencer.l1-confs=0 + --verifier.l1-confs=0 + --p2p.sequencer.key=8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba + --rollup.config=/rollup.json + --rpc.addr=0.0.0.0 + --rpc.port=8545 + --p2p.listen.ip=0.0.0.0 + --p2p.listen.tcp=9003 + --p2p.listen.udp=9003 + --p2p.ban.peers=true + --snapshotlog.file=/op_log/snapshot.log + --p2p.priv.path=/config/p2p-node-key.txt + --metrics.enabled + --metrics.addr=0.0.0.0 + --metrics.port=7300 + --pprof.enabled + --rpc.enable-admin + --l1.trustrpc + ports: + - "7545:8545" + - "9003:9003" + - "7300:7300" + - "6060:6060" + volumes: + - "${PWD}/setup/p2p-sequencer-key.txt:/config/p2p-sequencer-key.txt" + - "${PWD}/setup/p2p-node-key.txt:/config/p2p-node-key.txt" + - "${PWD}/setup/jwt-secret.txt:/config/test-jwt-secret.txt" + - "${PWD}/config/rollup-config.json:/rollup.json" + - op_log:/op_log + + op-proposer: + depends_on: + - op_stack_go_builder + - l1 + - l2 + - op-node + build: + context: ../ + dockerfile: ./op-proposer/Dockerfile + args: + OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:devnet + ports: + - "6062:6060" + - "7302:7300" + - "6546:8545" + environment: + OP_PROPOSER_L1_ETH_RPC: http://l1:8545 + OP_PROPOSER_ROLLUP_RPC: http://op-node:8545 + OP_PROPOSER_POLL_INTERVAL: 1s + OP_PROPOSER_NUM_CONFIRMATIONS: 1 + OP_PROPOSER_MNEMONIC: test test test test test test test test test test test junk + OP_PROPOSER_L2_OUTPUT_HD_PATH: "m/44'/60'/0'/0/1" + OP_PROPOSER_L2OO_ADDRESS: "${L2OO_ADDRESS}" + OP_PROPOSER_DGF_ADDRESS: "${DGF_ADDRESS}" + OP_PROPOSER_DG_TYPE: "${DG_TYPE}" + OP_PROPOSER_PROPOSAL_INTERVAL: "${PROPOSAL_INTERVAL}" + OP_PROPOSER_PPROF_ENABLED: "true" + OP_PROPOSER_METRICS_ENABLED: "true" + OP_PROPOSER_ALLOW_NON_FINALIZED: "true" + OP_PROPOSER_RPC_ENABLE_ADMIN: "true" + + op-batcher: + depends_on: + - op_stack_go_builder + - l1 + - l2 + - op-node + build: + context: ../ + dockerfile: ./op-batcher/Dockerfile + args: + OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:devnet + ports: + - "6061:6060" + - "7301:7300" + - "6545:8545" + environment: + OP_BATCHER_L1_ETH_RPC: http://l1:8545 + OP_BATCHER_L2_ETH_RPC: http://l2:8545 + OP_BATCHER_ROLLUP_RPC: http://op-node:8545 + OP_BATCHER_MAX_CHANNEL_DURATION: 1 + OP_BATCHER_SUB_SAFETY_MARGIN: 4 # SWS is 15, ChannelTimeout is 40 + OP_BATCHER_POLL_INTERVAL: 1s + OP_BATCHER_NUM_CONFIRMATIONS: 1 + OP_BATCHER_MNEMONIC: test test test test test test test test test test test junk + OP_BATCHER_SEQUENCER_HD_PATH: "m/44'/60'/0'/0/2" + OP_BATCHER_PPROF_ENABLED: "true" + OP_BATCHER_METRICS_ENABLED: "true" + OP_BATCHER_RPC_ENABLE_ADMIN: "true" + OP_BATCHER_BATCH_TYPE: 0 + + # op-challenger: + # depends_on: + # - op_stack_go_builder + # - l1 + # - l2 + # - op-node + # build: + # context: ../ + # dockerfile: ./op-challenger/Dockerfile + # args: + # OP_STACK_GO_BUILDER: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:devnet + # image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:devnet + # environment: + # OP_CHALLENGER_L1_ETH_RPC: http://l1:8545 + # OP_CHALLENGER_ROLLUP_RPC: http://op-node:8545 + # OP_CHALLENGER_TRACE_TYPE: cannon + # OP_CHALLENGER_GAME_FACTORY_ADDRESS: ${DGF_ADDRESS} + # OP_CHALLENGER_DATADIR: temp/challenger-data + # OP_CHALLENGER_CANNON_ROLLUP_CONFIG: ./.devnet/rollup.json + # OP_CHALLENGER_CANNON_L2_GENESIS: ./.devnet/genesis-l2.json + # OP_CHALLENGER_CANNON_BIN: ./cannon/bin/cannon + # OP_CHALLENGER_CANNON_SERVER: ./op-program/bin/op-program + # OP_CHALLENGER_CANNON_PRESTATE: ./op-program/bin/prestate.json + # OP_CHALLENGER_CANNON_L2: http://l2:8545 + # OP_CHALLENGER_MNEMONIC: test test test test test test test test test test test junk + # OP_CHALLENGER_HD_PATH: "m/44'/60'/0'/0/4" + # OP_CHALLENGER_NUM_CONFIRMATIONS: 1 + + # artifact-server: + # depends_on: + # - l1 + # image: nginx:1.25-alpine + # ports: + # - "8080:80" + # volumes: + # - "${PWD}/../.devnet/:/usr/share/nginx/html/:ro" + # security_opt: + # - "no-new-privileges:true" diff --git a/cel2-migration-test/entrypoint-l1.sh b/cel2-migration-test/entrypoint-l1.sh new file mode 100644 index 0000000000000..4d86e3fa5d14a --- /dev/null +++ b/cel2-migration-test/entrypoint-l1.sh @@ -0,0 +1,72 @@ +#!/bin/bash +set -exu + +VERBOSITY=${GETH_VERBOSITY:-3} +GETH_DATA_DIR=/db +GETH_CHAINDATA_DIR="$GETH_DATA_DIR/geth/chaindata" +GETH_KEYSTORE_DIR="$GETH_DATA_DIR/keystore" +GENESIS_FILE_PATH="${GENESIS_FILE_PATH:-/genesis.json}" +CHAIN_ID=$(cat "$GENESIS_FILE_PATH" | jq -r .config.chainId) +BLOCK_SIGNER_PRIVATE_KEY="ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" +BLOCK_SIGNER_ADDRESS="0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" +RPC_PORT="${RPC_PORT:-8545}" +WS_PORT="${WS_PORT:-8546}" + +if [ ! -d "$GETH_KEYSTORE_DIR" ]; then + echo "$GETH_KEYSTORE_DIR missing, running account import" + echo -n "pwd" > "$GETH_DATA_DIR"/password + echo -n "$BLOCK_SIGNER_PRIVATE_KEY" | sed 's/0x//' > "$GETH_DATA_DIR"/block-signer-key + geth account import \ + --datadir="$GETH_DATA_DIR" \ + --password="$GETH_DATA_DIR"/password \ + "$GETH_DATA_DIR"/block-signer-key +else + echo "$GETH_KEYSTORE_DIR exists." +fi + +if [ ! -d "$GETH_CHAINDATA_DIR" ]; then + echo "$GETH_CHAINDATA_DIR missing, running init" + echo "Initializing genesis." + geth --verbosity="$VERBOSITY" init \ + --datadir="$GETH_DATA_DIR" \ + "$GENESIS_FILE_PATH" +else + echo "$GETH_CHAINDATA_DIR exists." +fi + +# Warning: Archive mode is required, otherwise old trie nodes will be +# pruned within minutes of starting the devnet. + +exec geth \ + --datadir="$GETH_DATA_DIR" \ + --verbosity="$VERBOSITY" \ + --http \ + --http.corsdomain="*" \ + --http.vhosts="*" \ + --http.addr=0.0.0.0 \ + --http.port="$RPC_PORT" \ + --http.api=web3,debug,eth,txpool,net,engine \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port="$WS_PORT" \ + --ws.origins="*" \ + --ws.api=debug,eth,txpool,net,engine \ + --syncmode=full \ + --nodiscover \ + --maxpeers=1 \ + --networkid="$CHAIN_ID" \ + --unlock="$BLOCK_SIGNER_ADDRESS" \ + --mine \ + --miner.etherbase="$BLOCK_SIGNER_ADDRESS" \ + --password="$GETH_DATA_DIR"/password \ + --allow-insecure-unlock \ + --rpc.allow-unprotected-txs \ + --authrpc.addr="0.0.0.0" \ + --authrpc.port="8551" \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret=/config/jwt-secret.txt \ + --gcmode=archive \ + --metrics \ + --metrics.addr=0.0.0.0 \ + --metrics.port=6060 \ + "$@" diff --git a/cel2-migration-test/entrypoint-l2.sh b/cel2-migration-test/entrypoint-l2.sh new file mode 100644 index 0000000000000..57d4c41c708ba --- /dev/null +++ b/cel2-migration-test/entrypoint-l2.sh @@ -0,0 +1,47 @@ +#!/bin/sh +set -exu + +VERBOSITY=${GETH_VERBOSITY:-3} +GETH_DATA_DIR=/db +GETH_CHAINDATA_DIR="$GETH_DATA_DIR/geth/chaindata" +RPC_PORT="${RPC_PORT:-8545}" +WS_PORT="${WS_PORT:-8546}" + +if [ ! -d "$GETH_CHAINDATA_DIR" ]; then + echo "$GETH_CHAINDATA_DIR missing, did you run migration?" +else + echo "$GETH_CHAINDATA_DIR exists." +fi + +# Warning: Archive mode is required, otherwise old trie nodes will be +# pruned within minutes of starting the devnet. + +exec geth \ + --datadir="$GETH_DATA_DIR" \ + --verbosity="$VERBOSITY" \ + --http \ + --http.corsdomain="*" \ + --http.vhosts="*" \ + --http.addr=0.0.0.0 \ + --http.port="$RPC_PORT" \ + --http.api=web3,debug,eth,txpool,net,engine \ + --ws \ + --ws.addr=0.0.0.0 \ + --ws.port="$WS_PORT" \ + --ws.origins="*" \ + --ws.api=debug,eth,txpool,net,engine \ + --syncmode=full \ + --nodiscover \ + --maxpeers=0 \ + --rpc.allow-unprotected-txs \ + --authrpc.addr="0.0.0.0" \ + --authrpc.port="8551" \ + --authrpc.vhosts="*" \ + --authrpc.jwtsecret=/config/jwt-secret.txt \ + --gcmode=archive \ + --metrics \ + --metrics.addr=0.0.0.0 \ + --metrics.port=6060 \ + "$@" + + # --networkid="$CHAIN_ID" \ diff --git a/cel2-migration-test/migrate-state.sh b/cel2-migration-test/migrate-state.sh new file mode 100755 index 0000000000000..de549420dd91a --- /dev/null +++ b/cel2-migration-test/migrate-state.sh @@ -0,0 +1,23 @@ +set -o errexit # abort on nonzero exitstatus +set -o nounset # abort on unbound variable +set -o pipefail # don't hide errors within pipes + +SOURCE_DIR="data/l2/source" +TARGET_DIR="data/l2/migrated" + +if [ -d "$TARGET_DIR" ]; then rm -Rf $TARGET_DIR; fi + +cp -r ${SOURCE_DIR} ${TARGET_DIR} + +MIGRATION_BIN="../op-chain-ops/bin/celo-migrate" + +${MIGRATION_BIN} \ + --deploy-config config/config.json \ + --l1-deployments config/deployment-l1.json \ + --l1-rpc http://localhost:8545 \ + --db-path ${TARGET_DIR} \ + --outfile.l2 config/op-state-log.json \ + --outfile.rollup config/rollup-config.json \ + +# Move datadir to the place where op-geth expects it +mv ${TARGET_DIR}/celo ${TARGET_DIR}/geth diff --git a/cel2-migration-test/setup/genesis-l1.json b/cel2-migration-test/setup/genesis-l1.json new file mode 100755 index 0000000000000..9741f7e04ddf4 --- /dev/null +++ b/cel2-migration-test/setup/genesis-l1.json @@ -0,0 +1,59 @@ +{ + "config": { + "chainId": 900, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "clique": { + "period": 12, + "epoch": 30000 + } + }, + "nonce": "0x0", + "timestamp": "0x65141956", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb922660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "0x1c9c380", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "0000000000000000000000000000000000000000": { + "balance": "0x1" + }, + "f39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + }, + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + }, + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + }, + "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + }, + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + }, + "0x3fAB184622Dc19b6109349B94811493BF2a45362": { + "balance": "0x200000000000000000000000000000000000000000000000000000000000000" + } + }, + + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": "0x3b9aca00", + "excessBlobGas": null, + "blobGasUsed": null +} diff --git a/cel2-migration-test/setup/jwt-secret.txt b/cel2-migration-test/setup/jwt-secret.txt new file mode 100644 index 0000000000000..aa3563e31685c --- /dev/null +++ b/cel2-migration-test/setup/jwt-secret.txt @@ -0,0 +1 @@ +f5c0218003b69623036ed32a2837e5e838f31e5f56c1af5b10abe99c5403ff9c diff --git a/cel2-migration-test/setup/p2p-node-key.txt b/cel2-migration-test/setup/p2p-node-key.txt new file mode 100644 index 0000000000000..890c029bcda7b --- /dev/null +++ b/cel2-migration-test/setup/p2p-node-key.txt @@ -0,0 +1 @@ +dae4671006c60a3619556ace98eca6f6e092948d05b13070a27ac492a4fba419 diff --git a/cel2-migration-test/setup/p2p-sequencer-key.txt b/cel2-migration-test/setup/p2p-sequencer-key.txt new file mode 100644 index 0000000000000..f1176f6df5ed5 --- /dev/null +++ b/cel2-migration-test/setup/p2p-sequencer-key.txt @@ -0,0 +1 @@ +0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6 diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index 1e39aca416949..69fba75999c6c 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -7,6 +7,9 @@ ecotone-scalar: receipt-reference-builder: go build -o ./bin/receipt-reference-builder ./cmd/receipt-reference-builder/*.go +celo-migrate: + go build -o ./bin/celo-migrate ./cmd/celo-migrate/*.go + test: go test ./... diff --git a/op-chain-ops/cmd/op-migrate/README.md b/op-chain-ops/cmd/celo-migrate/README.md similarity index 84% rename from op-chain-ops/cmd/op-migrate/README.md rename to op-chain-ops/cmd/celo-migrate/README.md index 700aab909a92f..9c0777cf47811 100644 --- a/op-chain-ops/cmd/op-migrate/README.md +++ b/op-chain-ops/cmd/celo-migrate/README.md @@ -1,4 +1,4 @@ -# State migrator +# Celo L1 -> Cel2 migrator This tool allows migrating the state of a Celo chain to a genesis block for a CeL2 chain. @@ -19,6 +19,12 @@ build/bin/mycelo load-bot tmp/testenv ``` To run the migration, run in `op-chain-ops` (set `CELO_DATADIR` if the `celo-blockchain` repo is not located at `~/celo-blockchain`): + ```sh make && ./migrate.sh -``` \ No newline at end of file +``` + +## Tasks + +- Import and change `BuildL2Genesis` + - Don't set balances for predeploys/precompiles diff --git a/op-chain-ops/cmd/celo-migrate/main.go b/op-chain-ops/cmd/celo-migrate/main.go new file mode 100644 index 0000000000000..208feeade5aa6 --- /dev/null +++ b/op-chain-ops/cmd/celo-migrate/main.go @@ -0,0 +1,459 @@ +package main + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + "os" + "path/filepath" + "time" + + "github.com/ethereum-optimism/optimism/op-bindings/predeploys" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" + "github.com/mattn/go-isatty" + + "github.com/urfave/cli/v2" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" +) + +var ( + deployConfigFlag = &cli.PathFlag{ + Name: "deploy-config", + Usage: "Path to deploy config file", + Required: true, + } + l1DeploymentsFlag = &cli.PathFlag{ + Name: "l1-deployments", + Usage: "Path to L1 deployments JSON file. Cannot be used with --deployment-dir", + } + l1RPCFlag = &cli.StringFlag{ + Name: "l1-rpc", + Usage: "RPC URL for an Ethereum L1 node. Cannot be used with --l1-starting-block", + } + outfileL2Flag = &cli.PathFlag{ + Name: "outfile.l2", + Usage: "Path to L2 genesis output file", + } + outfileRollupFlag = &cli.PathFlag{ + Name: "outfile.rollup", + Usage: "Path to rollup output file", + } + + dbPathFlag = &cli.StringFlag{ + Name: "db-path", + Usage: "Path to database", + Required: true, + } + dbCacheFlag = &cli.IntFlag{ + Name: "db-cache", + Usage: "LevelDB cache size in mb", + Value: 1024, + } + dbHandlesFlag = &cli.IntFlag{ + Name: "db-handles", + Usage: "LevelDB number of handles", + Value: 60, + } + dryRunFlag = &cli.BoolFlag{ + Name: "dry-run", + Usage: "Dry run the upgrade by not committing the database", + } + + flags = []cli.Flag{ + deployConfigFlag, + l1DeploymentsFlag, + l1RPCFlag, + outfileL2Flag, + outfileRollupFlag, + dbPathFlag, + dbCacheFlag, + dbHandlesFlag, + dryRunFlag, + } + + // from `packages/contracts-bedrock/deploy-config/internal-devnet.json` + EIP1559Denominator = uint64(50) // TODO(pl): select values + EIP1559DenominatorCanyon = uint64(250) // TODO(pl): select values + EIP1559Elasticity = uint64(10) +) + +func main() { + log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) + + app := &cli.App{ + Name: "migrate", + Usage: "Migrate Celo state to a CeL2 DB", + Flags: flags, + Action: func(ctx *cli.Context) error { + deployConfig := ctx.Path("deploy-config") + if deployConfig == "" { + return fmt.Errorf("must specify --deploy-config") + } + log.Info("Deploy config", "path", deployConfig) + config, err := genesis.NewDeployConfig(deployConfig) + if err != nil { + return err + } + + // Try reading the L1 deployment information + l1Deployments := ctx.Path("l1-deployments") + if l1Deployments == "" { + return fmt.Errorf("must specify --l1-deployments") + } + deployments, err := genesis.NewL1Deployments(l1Deployments) + if err != nil { + return fmt.Errorf("cannot read L1 deployments at %s: %w", l1Deployments, err) + } + config.SetDeployments(deployments) + + // Get latest block information from L1 + l1RPC := ctx.String("l1-rpc") + if l1RPC == "" { + return fmt.Errorf("must specify --l1-rpc") + } + + outfileL2 := ctx.Path("outfile.l2") + if outfileL2 == "" { + return fmt.Errorf("must specify --outfile.l2") + } + + outfileRollup := ctx.Path("outfile.rollup") + if outfileRollup == "" { + return fmt.Errorf("must specify --outfile.rollup") + } + + var l1StartBlock *types.Block + client, err := ethclient.Dial(l1RPC) + if err != nil { + return fmt.Errorf("cannot dial %s: %w", l1RPC, err) + } + + if config.L1StartingBlockTag == nil { + l1StartBlock, err = client.BlockByNumber(context.Background(), nil) + if err != nil { + return fmt.Errorf("cannot fetch latest block: %w", err) + } + tag := rpc.BlockNumberOrHashWithHash(l1StartBlock.Hash(), true) + config.L1StartingBlockTag = (*genesis.MarshalableRPCBlockNumberOrHash)(&tag) + } else if config.L1StartingBlockTag.BlockHash != nil { + l1StartBlock, err = client.BlockByHash(context.Background(), *config.L1StartingBlockTag.BlockHash) + if err != nil { + return fmt.Errorf("cannot fetch block by hash: %w", err) + } + } else if config.L1StartingBlockTag.BlockNumber != nil { + l1StartBlock, err = client.BlockByNumber(context.Background(), big.NewInt(config.L1StartingBlockTag.BlockNumber.Int64())) + if err != nil { + return fmt.Errorf("cannot fetch block by number: %w", err) + } + } + + // Ensure that there is a starting L1 block + if l1StartBlock == nil { + return fmt.Errorf("no starting L1 block") + } + + // Sanity check the config. Do this after filling in the L1StartingBlockTag + // if it is not defined. + if err := config.Check(); err != nil { + return err + } + + log.Info("Using L1 Start Block", "number", l1StartBlock.Number(), "hash", l1StartBlock.Hash().Hex()) + + // Build the L2 genesis block + l2Genesis, err := genesis.BuildL2Genesis(config, l1StartBlock) + if err != nil { + return fmt.Errorf("error creating l2 genesis: %w", err) + } + + // Write changes to state to actual state database + dbPath := ctx.String("db-path") + if dbPath == "" { + return fmt.Errorf("must specify --db-path") + } + dbCache := ctx.Int("db-cache") + dbHandles := ctx.Int("db-handles") + dryRun := ctx.Bool("dry-run") + // TODO(pl): Move this into the function + log.Info("Opening database", "dbCache", dbCache, "dbHandles", dbHandles, "dbPath", dbPath) + ldb, err := openCeloDb(dbPath, dbCache, dbHandles) + if err != nil { + return fmt.Errorf("cannot open DB: %w", err) + } + log.Info("Loaded Celo L1 DB", "db", ldb) + + cel2Header, err := ApplyMigrationChangesToDB(ldb, l2Genesis, !dryRun) + if err != nil { + return err + } + + // Close the database handle + if err := ldb.Close(); err != nil { + return err + } + + log.Info("Updated Cel2 state") + + log.Info("Writing state diff", "file", outfileL2) + // Write genesis file to check created state + if err := jsonutil.WriteJSON(outfileL2, l2Genesis); err != nil { + return err + } + + rollupConfig, err := config.RollupConfig(l1StartBlock, cel2Header.Hash(), cel2Header.Number.Uint64()) + if err != nil { + return err + } + if err := rollupConfig.Check(); err != nil { + return fmt.Errorf("generated rollup config does not pass validation: %w", err) + } + + log.Info("Writing rollup config", "file", outfileRollup) + if err := jsonutil.WriteJSON(outfileRollup, rollupConfig); err != nil { + return err + } + + return nil + }, + } + + if err := app.Run(os.Args); err != nil { + log.Crit("error in migration", "err", err) + } + log.Info("Finished migration successfully!") +} + +func ApplyMigrationChangesToDB(ldb ethdb.Database, genesis *core.Genesis, commit bool) (*types.Header, error) { + log.Info("Migrating DB") + + // Grab the hash of the tip of the legacy chain. + hash := rawdb.ReadHeadHeaderHash(ldb) + log.Info("Reading chain tip from database", "hash", hash) + + // Grab the header number. + num := rawdb.ReadHeaderNumber(ldb, hash) + if num == nil { + return nil, fmt.Errorf("cannot find header number for %s", hash) + } + log.Info("Reading chain tip num from database", "number", num) + + // Grab the full header. + header := rawdb.ReadHeader(ldb, hash, *num) + // trieRoot := header.Root + log.Info("Read header from database", "number", header) + + // We need to update the chain config to set the correct hardforks. + genesisHash := rawdb.ReadCanonicalHash(ldb, 0) + cfg := rawdb.ReadChainConfig(ldb, genesisHash) + if cfg == nil { + log.Crit("chain config not found") + } + log.Info("Read config from database", "config", cfg) + + dbFactory := func() (*state.StateDB, error) { + // Set up the backing store. + underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{ + Preimages: true, + }) + + // Open up the state database. + db, err := state.New(header.Root, underlyingDB, nil) + if err != nil { + return nil, fmt.Errorf("cannot open StateDB: %w", err) + } + + return db, nil + } + + db, err := dbFactory() + if err != nil { + return nil, fmt.Errorf("cannot create StateDB: %w", err) + } + + // So far we applied changes in the memory VM and collected changes in the genesis struct + // No we iterate through all accounts that have been written there and set them inside the statedb. + // This will change the state root + // Another property is that the total balance changes must be 0 + accountCounter := 0 + overwriteCounter := 0 + balanceDiff := big.NewInt(0) + for k, v := range genesis.Alloc { + accountCounter++ + if db.Exist(k) { + equal := bytes.Equal(db.GetCode(k), v.Code) + + log.Warn("Operating on existing state", "account", k, "equalCode", equal) + overwriteCounter++ + } + // TODO(pl): decide what to do with existing accounts. + db.CreateAccount(k) + + db.SetNonce(k, v.Nonce) + db.SetBalance(k, v.Balance) + db.SetCode(k, v.Code) + db.SetStorage(k, v.Storage) + + log.Info("Moved account", "address", k) + balanceDiff = balanceDiff.Add(balanceDiff, v.Balance) + } + log.Info("Migrated OP contracts into state DB", "copiedAccounts", accountCounter, "overwrittenAccounts", overwriteCounter) + if balanceDiff.Sign() != 0 { + log.Warn("Deploying OP contracts changed native balance", "diff", balanceDiff) + } + + // We're done messing around with the database, so we can now commit the changes to the DB. + // Note that this doesn't actually write the changes to disk. + log.Info("Committing state DB") + // TODO(pl): What block info to put here? + newRoot, err := db.Commit(1234, true) + if err != nil { + return nil, err + } + + log.Info("Creating new Genesis block") + // Create the header for the Bedrock transition block. + cel2Header := &types.Header{ + ParentHash: header.Hash(), + UncleHash: types.EmptyUncleHash, + Coinbase: predeploys.SequencerFeeVaultAddr, + Root: newRoot, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + Bloom: types.Bloom{}, + Difficulty: new(big.Int).Set(common.Big0), + Number: new(big.Int).Add(header.Number, common.Big1), + GasLimit: header.GasLimit, + GasUsed: 0, + Time: uint64(time.Now().Unix()), // TODO(pl): Needed to avoid L1-L2 time mismatches + Extra: []byte("CeL2 migration"), + MixDigest: common.Hash{}, + Nonce: types.BlockNonce{}, + BaseFee: new(big.Int).Set(header.BaseFee), + } + + // Create the Bedrock transition block from the header. Note that there are no transactions, + // uncle blocks, or receipts in the Bedrock transition block. + cel2Block := types.NewBlock(cel2Header, nil, nil, nil, trie.NewStackTrie(nil)) + + // We did it! + log.Info( + "Built Celo migration block", + "hash", cel2Block.Hash(), + "root", cel2Block.Root(), + "number", cel2Block.NumberU64(), + "gas-used", cel2Block.GasUsed(), + "gas-limit", cel2Block.GasLimit(), + ) + + log.Info("Header", "header", cel2Header) + + // If we're not actually writing this to disk, then we're done. + if !commit { + log.Info("Dry run complete") + return nil, nil + } + + // Otherwise we need to write the changes to disk. First we commit the state changes. + log.Info("Committing trie DB") + if err := db.Database().TrieDB().Commit(newRoot, true); err != nil { + return nil, err + } + + // Next we write the Cel2 genesis block to the database. + rawdb.WriteTd(ldb, cel2Block.Hash(), cel2Block.NumberU64(), cel2Block.Difficulty()) + rawdb.WriteBlock(ldb, cel2Block) + rawdb.WriteReceipts(ldb, cel2Block.Hash(), cel2Block.NumberU64(), nil) + rawdb.WriteCanonicalHash(ldb, cel2Block.Hash(), cel2Block.NumberU64()) + rawdb.WriteHeadBlockHash(ldb, cel2Block.Hash()) + rawdb.WriteHeadFastBlockHash(ldb, cel2Block.Hash()) + rawdb.WriteHeadHeaderHash(ldb, cel2Block.Hash()) + + // TODO(pl): What does finalized mean here? + // Make the first CeL2 block a finalized block. + rawdb.WriteFinalizedBlockHash(ldb, cel2Block.Hash()) + + // Set the standard options. + cfg.LondonBlock = cel2Block.Number() + cfg.BerlinBlock = cel2Block.Number() + cfg.ArrowGlacierBlock = cel2Block.Number() + cfg.GrayGlacierBlock = cel2Block.Number() + cfg.MergeNetsplitBlock = cel2Block.Number() + cfg.TerminalTotalDifficulty = big.NewInt(0) + cfg.TerminalTotalDifficultyPassed = true + + // Set the Optimism options. + cfg.BedrockBlock = cel2Block.Number() + // Enable Regolith from the start of Bedrock + cfg.RegolithTime = new(uint64) // what are those? do we need those? + cfg.Optimism = ¶ms.OptimismConfig{ + EIP1559Denominator: EIP1559Denominator, + EIP1559DenominatorCanyon: EIP1559DenominatorCanyon, + EIP1559Elasticity: EIP1559Elasticity, + } + cfg.CanyonTime = &cel2Header.Time + cfg.EcotoneTime = &cel2Header.Time + cfg.ShanghaiTime = &cel2Header.Time + cfg.Cel2Time = &cel2Header.Time + + log.Info("Write new config to database", "config", cfg) + + // Write the chain config to disk. + // TODO(pl): Why do we need to write this with the genesis hash, not `cel2Block.Hash()`?` + rawdb.WriteChainConfig(ldb, genesisHash, cfg) + + // Yay! + log.Info( + "Wrote chain config", + "1559-denominator", EIP1559Denominator, + "1559-denominator-canyon", EIP1559DenominatorCanyon, + "1559-elasticity", EIP1559Elasticity, + ) + + // We're done! + log.Info( + "Wrote CeL2 transition block", + "height", cel2Header.Number, + "root", cel2Header.Root.String(), + "hash", cel2Header.Hash().String(), + "timestamp", cel2Header.Time, + ) + + return cel2Header, nil +} + +// Opens a Celo database, stored in the `celo` subfolder +func openCeloDb(path string, cache int, handles int) (ethdb.Database, error) { + if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) { + return nil, err + } + + chaindataPath := filepath.Join(path, "celo", "chaindata") + ancientPath := filepath.Join(chaindataPath, "ancient") + ldb, err := rawdb.Open(rawdb.OpenOptions{ + Type: "leveldb", + Directory: chaindataPath, + AncientsDirectory: ancientPath, + Namespace: "", + Cache: cache, + Handles: handles, + ReadOnly: false, + }) + if err != nil { + return nil, err + } + return ldb, nil +} diff --git a/op-chain-ops/cmd/op-migrate/main.go b/op-chain-ops/cmd/op-migrate/main.go deleted file mode 100644 index b1e189023802a..0000000000000 --- a/op-chain-ops/cmd/op-migrate/main.go +++ /dev/null @@ -1,312 +0,0 @@ -package main - -import ( - "fmt" - "math/big" - "os" - "path/filepath" - - "github.com/ethereum-optimism/optimism/op-bindings/predeploys" - "github.com/mattn/go-isatty" - - "github.com/urfave/cli/v2" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" -) - -// from `packages/contracts-bedrock/deploy-config/internal-devnet.json` -var ( - EIP1559Denominator = uint64(50) // TODO: what values - EIP1559Elasticity = uint64(10) -) - -func main() { - log.Root().SetHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(isatty.IsTerminal(os.Stderr.Fd())))) - - app := &cli.App{ - Name: "migrate", - Usage: "Migrate Celo state to a CeL2 genesis DB", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "db-path", - Usage: "Path to database", - Required: true, - }, - &cli.BoolFlag{ - Name: "dry-run", - Usage: "Dry run the upgrade by not committing the database", - }, - &cli.BoolFlag{ - Name: "no-check", - Usage: "Do not perform sanity checks. This should only be used for testing", - }, - &cli.IntFlag{ - Name: "db-cache", - Usage: "LevelDB cache size in mb", - Value: 1024, - }, - &cli.IntFlag{ - Name: "db-handles", - Usage: "LevelDB number of handles", - Value: 60, - }, - }, - Action: func(ctx *cli.Context) error { - dbCache := ctx.Int("db-cache") - dbHandles := ctx.Int("db-handles") - dbPath := ctx.String("db-path") - log.Info("Opening database", "dbCache", dbCache, "dbHandles", dbHandles, "dbPath", dbPath) - ldb, err := Open(dbPath, dbCache, dbHandles) - if err != nil { - return fmt.Errorf("cannot open DB: %w", err) - } - - dryRun := ctx.Bool("dry-run") - noCheck := ctx.Bool("no-check") - if noCheck { - panic("must run with check on") - } - - // Perform the migration - _, err = MigrateDB(ldb, !dryRun, noCheck) - if err != nil { - return err - } - - // Close the database handle - if err := ldb.Close(); err != nil { - return err - } - - log.Info("Finished migration successfully!") - - return nil - }, - } - - if err := app.Run(os.Args); err != nil { - log.Crit("error in migration", "err", err) - } -} - -type MigrationResult struct { - TransitionHeight uint64 - TransitionTimestamp uint64 - TransitionBlockHash common.Hash -} - -// MigrateDB will migrate a celo database to a new OP genesis block -func MigrateDB(ldb ethdb.Database, commit, noCheck bool) (*MigrationResult, error) { - log.Info("Migrating DB") - - // Grab the hash of the tip of the legacy chain. - hash := rawdb.ReadHeadHeaderHash(ldb) - log.Info("Reading chain tip from database", "hash", hash) - - // Grab the header number. - num := rawdb.ReadHeaderNumber(ldb, hash) - if num == nil { - return nil, fmt.Errorf("cannot find header number for %s", hash) - } - log.Info("Reading chain tip num from database", "number", num) - - // Grab the full header. - header := rawdb.ReadHeader(ldb, hash, *num) - trieRoot := header.Root - log.Info("Read header from database", "number", header) - - // We need to update the chain config to set the correct hardforks. - genesisHash := rawdb.ReadCanonicalHash(ldb, 0) - cfg := rawdb.ReadChainConfig(ldb, genesisHash) - if cfg == nil { - log.Crit("chain config not found") - } - log.Info("Read config from database", "config", cfg) - - // dbFactory := func() (*state.StateDB, error) { - // // Set up the backing store. - // underlyingDB := state.NewDatabaseWithConfig(ldb, &trie.Config{ - // Preimages: true, - // Cache: 1024, - // }) - - // // Open up the state database. - // db, err := state.New(header.Root, underlyingDB, nil) - // if err != nil { - // return nil, fmt.Errorf("cannot open StateDB: %w", err) - // } - - // return db, nil - // } - - // db, err := dbFactory() - // if err != nil { - // return nil, fmt.Errorf("cannot create StateDB: %w", err) - // } - - // Remove old blocks, so that we start with a fresh genesis block - currentHash := header.ParentHash - for { - // There are no uncles in Celo - num = rawdb.ReadHeaderNumber(ldb, currentHash) - hash = rawdb.ReadCanonicalHash(ldb, *num) - - log.Info("Deleting block", "hash", currentHash, "c", hash, "number", *num) - if commit { - rawdb.DeleteBlock(ldb, currentHash, *num) - } - if *num == 0 { - break - } - - header = rawdb.ReadHeader(ldb, currentHash, *num) - if header == nil { - return nil, fmt.Errorf("couldn't find header") - } - currentHash = header.ParentHash - } - - log.Info("Successfully cleaned old blocks") - - // We're done messing around with the database, so we can now commit the changes to the DB. - // Note that this doesn't actually write the changes to disk. - // log.Info("Committing state DB") - // newRoot, err := db.Commit(true) - // if err != nil { - // return nil, err - // } - - log.Info("Creating new Genesis block") - // Create the header for the Bedrock transition block. - cel2Header := &types.Header{ - ParentHash: common.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - UncleHash: types.EmptyUncleHash, - Coinbase: predeploys.SequencerFeeVaultAddr, // TODO - Root: trieRoot, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, - Bloom: types.Bloom{}, - Difficulty: common.Big0, - Number: common.Big0, - GasLimit: (uint64)(20_000_000), - GasUsed: 0, - Time: uint64(12345), - Extra: []byte("CeL2"), - MixDigest: common.Hash{}, - Nonce: types.BlockNonce{}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - - // Create the Bedrock transition block from the header. Note that there are no transactions, - // uncle blocks, or receipts in the Bedrock transition block. - cel2Block := types.NewBlock(cel2Header, nil, nil, nil, trie.NewStackTrie(nil)) - - // We did it! - log.Info( - "Built Bedrock transition", - "hash", cel2Block.Hash(), - "root", cel2Block.Root(), - "number", cel2Block.NumberU64(), - "gas-used", cel2Block.GasUsed(), - "gas-limit", cel2Block.GasLimit(), - ) - - log.Info("Header", "header", cel2Header) - log.Info("Body", "Body", cel2Block) - - // Create the result of the migration. - res := &MigrationResult{ - TransitionHeight: cel2Block.NumberU64(), - TransitionTimestamp: cel2Block.Time(), - TransitionBlockHash: cel2Block.Hash(), - } - - // If we're not actually writing this to disk, then we're done. - if !commit { - log.Info("Dry run complete") - return res, nil - } - - // Otherwise we need to write the changes to disk. First we commit the state changes. - // log.Info("Committing trie DB") - // if err := db.Database().TrieDB().Commit(newRoot, true); err != nil { - // return nil, err - // } - - // Next we write the Cel2 genesis block to the database. - rawdb.WriteTd(ldb, cel2Block.Hash(), cel2Block.NumberU64(), cel2Block.Difficulty()) - rawdb.WriteBlock(ldb, cel2Block) - rawdb.WriteReceipts(ldb, cel2Block.Hash(), cel2Block.NumberU64(), nil) - rawdb.WriteCanonicalHash(ldb, cel2Block.Hash(), cel2Block.NumberU64()) - rawdb.WriteHeadBlockHash(ldb, cel2Block.Hash()) - rawdb.WriteHeadFastBlockHash(ldb, cel2Block.Hash()) - rawdb.WriteHeadHeaderHash(ldb, cel2Block.Hash()) - - // TODO - // Make the first CeL2 block a finalized block. - rawdb.WriteFinalizedBlockHash(ldb, cel2Block.Hash()) - - // Set the standard options. - // TODO: What about earlier hardforks, e.g. does berlin have to be enabled as it never was on Celo? - cfg.LondonBlock = cel2Block.Number() - cfg.ArrowGlacierBlock = cel2Block.Number() - cfg.GrayGlacierBlock = cel2Block.Number() - cfg.MergeNetsplitBlock = cel2Block.Number() - cfg.TerminalTotalDifficulty = big.NewInt(0) - cfg.TerminalTotalDifficultyPassed = true - - // Set the Optimism options. - cfg.BedrockBlock = cel2Block.Number() - // Enable Regolith from the start of Bedrock - cfg.RegolithTime = new(uint64) // what are those? do we need those? - cfg.Optimism = ¶ms.OptimismConfig{ - EIP1559Denominator: EIP1559Denominator, - EIP1559Elasticity: EIP1559Elasticity, - } - - // Write the chain config to disk. - rawdb.WriteChainConfig(ldb, cel2Block.Hash(), cfg) - - // Yay! - log.Info( - "Wrote chain config", - "1559-denominator", EIP1559Denominator, - "1559-elasticity", EIP1559Elasticity, - ) - - // We're done! - log.Info( - "Wrote CeL2 transition block", - "height", cel2Header.Number, - "root", cel2Header.Root.String(), - "hash", cel2Header.Hash().String(), - "timestamp", cel2Header.Time, - ) - - // Return the result and have a nice day. - return res, nil -} - -func Open(path string, cache int, handles int) (ethdb.Database, error) { - chaindataPath := filepath.Join(path, "celo", "chaindata") - ancientPath := filepath.Join(chaindataPath, "ancient") - ldb, err := rawdb.Open(rawdb.OpenOptions{ - Type: "leveldb", - Directory: chaindataPath, - AncientsDirectory: ancientPath, - Namespace: "", - Cache: cache, - Handles: handles, - ReadOnly: false, - }) - if err != nil { - return nil, err - } - return ldb, nil -} diff --git a/op-chain-ops/genesis/layer_two.go b/op-chain-ops/genesis/layer_two.go index 3b2bf85e1a3ba..9692e3a2e51e8 100644 --- a/op-chain-ops/genesis/layer_two.go +++ b/op-chain-ops/genesis/layer_two.go @@ -122,6 +122,8 @@ func BuildL2Genesis(config *DeployConfig, l1StartBlock *types.Block) (*core.Gene } func PerformUpgradeTxs(db *state.MemoryStateDB) error { + log.Info("Running Ecotone upgrade transactions") + // Only the Ecotone upgrade is performed with upgrade-txs. if !db.Genesis().Config.IsEcotone(db.Genesis().Timestamp) { return nil diff --git a/op-chain-ops/squash/sim.go b/op-chain-ops/squash/sim.go index 1d66bc135e06a..808ab83d1c10f 100644 --- a/op-chain-ops/squash/sim.go +++ b/op-chain-ops/squash/sim.go @@ -165,7 +165,7 @@ func (sim *SquashSim) AddUpgradeTxs(txs []hexutil.Bytes) error { return fmt.Errorf("failed to turn upgrade tx %d into message: %w", i, err) } if !msg.IsDepositTx { - return fmt.Errorf("upgrade tx %d is not a depost", i) + return fmt.Errorf("upgrade tx %d is not a deposit", i) } if res, err := sim.AddMessage(msg); err != nil { return fmt.Errorf("invalid upgrade tx %d, EVM invocation failed: %w", i, err)