diff --git a/.github/workflows/docker-build-scan.yaml b/.github/workflows/docker-build-scan.yaml index 30ad5196c7fec..631cdb37b0c87 100644 --- a/.github/workflows/docker-build-scan.yaml +++ b/.github/workflows/docker-build-scan.yaml @@ -1,92 +1,50 @@ name: Docker Build Scan on: + pull_request: + branches: + - "master" + - "celo*" workflow_dispatch: jobs: - Build-Scan-Container-op-ufm: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-ufm/Dockerfile - - Build-Scan-Container-ops-bedrock-l1: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.l1 - context: ops-bedrock - - Build-Scan-Container-ops-bedrock-l2: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.l2 - context: ops-bedrock - - Build-Scan-Container-indexer: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: indexer/Dockerfile - - Build-Scan-Container-op-heartbeat: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-heartbeat/Dockerfile - - Build-Scan-Container-op-exporter: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-exporter/Dockerfile - - Build-Scan-Container-op-program: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-program/Dockerfile - - Build-Scan-Container-ops-bedrock: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops-bedrock/Dockerfile.stateviz - - Build-Scan-Container-ci-builder: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: ops/docker/ci-builder/Dockerfile - - Build-Scan-Container-proxyd: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: proxyd/Dockerfile - - Build-Scan-Container-op-node: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-node/Dockerfile - - Build-Scan-Container-op-batcher: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-batcher/Dockerfile - - Build-Scan-Container-indexer-ui: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: indexer/ui/Dockerfile - - Build-Scan-Container-op-proposer: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-proposer/Dockerfile - - Build-Scan-Container-op-challenger: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-challenger/Dockerfile - - Build-Scan-Container-endpoint-monitor: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: endpoint-monitor/Dockerfile - - Build-Scan-Container-opwheel: - uses: celo-org/reusable-workflows/.github/workflows/container-cicd-local.yaml@v1.11.2 - with: - dockerfile: op-wheel/Dockerfile - + detect-files-changed: + runs-on: ubuntu-latest + outputs: + files-changed: ${{ steps.detect-files-changed.outputs.all_changed_files }} + steps: + - uses: actions/checkout@v4 + - name: Detect files changed + id: detect-files-changed + uses: tj-actions/changed-files@v44 + with: + separator: ',' + + build-cel2-migration-tool: + runs-on: ubuntu-latest + needs: detect-files-changed + if: | + contains(needs.detect-files-changed.outputs.files-changed, 'op-chain-ops/cmd/celo-dbmigrate') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-chain-ops/cmd/celo-migrate') || + contains(needs.detect-files-changed.outputs.files-changed, 'op-chain-ops/Dockerfile') + permissions: + contents: read + id-token: write + security-events: write + steps: + - uses: actions/checkout@v4 + - name: Login at GCP Artifact Registry + uses: celo-org/reusable-workflows/.github/actions/auth-gcp-artifact-registry@v2.0 + with: + workload-id-provider: 'projects/1094498259535/locations/global/workloadIdentityPools/gh-optimism/providers/github-by-repos' + service-account: 'celo-optimism-gh@devopsre.iam.gserviceaccount.com' + docker-gcp-registries: us-west1-docker.pkg.dev + - name: Build and push container + uses: celo-org/reusable-workflows/.github/actions/build-container@v2.0 + with: + platforms: linux/amd64 + registry: us-west1-docker.pkg.dev/devopsre/dev-images/cel2-migration-tool + tags: ${{ github.sha }} + context: ./ + dockerfile: ./op-chain-ops/Dockerfile + push: true + trivy: false diff --git a/go.mod b/go.mod index dd2fa5bc3dc76..708213d2a2f24 100644 --- a/go.mod +++ b/go.mod @@ -226,7 +226,8 @@ require ( rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/celo-org/op-geth v0.0.0-20240612131021-96a2a76ceaaa +// replace github.com/ethereum/go-ethereum => github.com/celo-org/op-geth v0.0.0-20240612131021-96a2a76ceaaa +replace github.com/ethereum/go-ethereum => github.com/celo-org/op-geth v0.0.0-20240612164035-ef9b9a19f53e //replace github.com/ethereum/go-ethereum v1.13.9 => ../op-geth diff --git a/go.sum b/go.sum index 45a9ca6301aaf..07804c7711ac2 100644 --- a/go.sum +++ b/go.sum @@ -77,8 +77,8 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/celo-org/op-geth v0.0.0-20240612131021-96a2a76ceaaa h1:a9OVs0F3EjWwePKDuDUPr0QWe/CWi7uwElH9JzUxq/8= -github.com/celo-org/op-geth v0.0.0-20240612131021-96a2a76ceaaa/go.mod h1:vObZmT4rKd8hjSblIktsJHtLX8SXbCoaIXEd42HMDB0= +github.com/celo-org/op-geth v0.0.0-20240612164035-ef9b9a19f53e h1:7KzqBAm9YmhrdtUG4dssU77za1ytBRuLIoqnzzJoBw8= +github.com/celo-org/op-geth v0.0.0-20240612164035-ef9b9a19f53e/go.mod h1:vObZmT4rKd8hjSblIktsJHtLX8SXbCoaIXEd42HMDB0= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= diff --git a/op-chain-ops/Dockerfile b/op-chain-ops/Dockerfile new file mode 100644 index 0000000000000..7ab849471454d --- /dev/null +++ b/op-chain-ops/Dockerfile @@ -0,0 +1,30 @@ +FROM golang:1.21.1-alpine3.18 as builder + +RUN apk --no-cache add make + +COPY ./go.mod /app/go.mod +COPY ./go.sum /app/go.sum + +WORKDIR /app + +RUN go mod download + +COPY ./op-bindings /app/op-bindings +COPY ./op-service /app/op-service +COPY ./op-node /app/op-node +COPY ./op-chain-ops /app/op-chain-ops +WORKDIR /app/op-chain-ops +RUN make celo-dbmigrate celo-migrate + +FROM alpine:3.18 +RUN apk --no-cache add ca-certificates bash rsync + +# RUN addgroup -S app && adduser -S app -G app +# USER app +WORKDIR /app + +COPY --from=builder /app/op-chain-ops/bin/celo-dbmigrate /app +COPY --from=builder /app/op-chain-ops/bin/celo-migrate /app +ENV PATH="/app:${PATH}" + +ENTRYPOINT ["/app/celo-dbmigrate"] diff --git a/op-chain-ops/Makefile b/op-chain-ops/Makefile index 9b548b0fd6b32..eeda9b0b597d9 100644 --- a/op-chain-ops/Makefile +++ b/op-chain-ops/Makefile @@ -15,6 +15,9 @@ receipt-reference-builder: op-upgrade: go build -o ./bin/op-upgrade ./cmd/op-upgrade/main.go +celo-dbmigrate: + go build -o ./bin/celo-dbmigrate ./cmd/celo-dbmigrate/*.go + celo-migrate: go build -o ./bin/celo-migrate ./cmd/celo-migrate/*.go diff --git a/op-chain-ops/cmd/celo-dbmigrate/ancients.go b/op-chain-ops/cmd/celo-dbmigrate/ancients.go new file mode 100644 index 0000000000000..9d3afd147385a --- /dev/null +++ b/op-chain-ops/cmd/celo-dbmigrate/ancients.go @@ -0,0 +1,191 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "golang.org/x/sync/errgroup" +) + +// RLPBlockRange is a range of blocks in RLP format +type RLPBlockRange struct { + start uint64 + hashes [][]byte + headers [][]byte + bodies [][]byte + receipts [][]byte + tds [][]byte +} + +func migrateAncientsDb(oldDBPath, newDBPath string, batchSize uint64) (uint64, error) { + oldFreezer, err := rawdb.NewChainFreezer(filepath.Join(oldDBPath, "ancient"), "", false) // TODO can't be readonly because we need the .meta files to be created + if err != nil { + return 0, fmt.Errorf("failed to open old freezer: %w", err) + } + defer oldFreezer.Close() + + newFreezer, err := rawdb.NewChainFreezer(filepath.Join(newDBPath, "ancient"), "", false) + if err != nil { + return 0, fmt.Errorf("failed to open new freezer: %w", err) + } + defer newFreezer.Close() + + numAncientsOld, err := oldFreezer.Ancients() + if err != nil { + return 0, fmt.Errorf("failed to get number of ancients in old freezer: %w", err) + } + + numAncientsNew, err := newFreezer.Ancients() + if err != nil { + return 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) + } + + log.Info("Migration Started", "process", "ancients migration", "startBlock", numAncientsNew, "endBlock", numAncientsOld, "count", numAncientsOld-numAncientsNew+1) + g, ctx := errgroup.WithContext(context.Background()) + readChan := make(chan RLPBlockRange, 10) + transformChan := make(chan RLPBlockRange, 10) + + log.Info("Migrating data", "start", numAncientsNew, "end", numAncientsOld, "step", batchSize) + + g.Go(func() error { + return readAncientBlocks(ctx, oldFreezer, numAncientsNew, numAncientsOld, batchSize, readChan) + }) + g.Go(func() error { return transformBlocks(ctx, readChan, transformChan) }) + g.Go(func() error { return writeAncientBlocks(ctx, newFreezer, transformChan) }) + + if err = g.Wait(); err != nil { + return 0, fmt.Errorf("failed to migrate ancients: %w", err) + } + + numAncientsNew, err = newFreezer.Ancients() + if err != nil { + return 0, fmt.Errorf("failed to get number of ancients in new freezer: %w", err) + } + + log.Info("Migration End", "process", "ancients migration", "totalBlocks", numAncientsNew) + return numAncientsNew, nil +} + +func readAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, startBlock, endBlock, batchSize uint64, out chan<- RLPBlockRange) error { + defer close(out) + + for i := startBlock; i < endBlock; i += batchSize { + select { + case <-ctx.Done(): + return ctx.Err() + default: + count := min(batchSize, endBlock-i+1) + start := i + + blockRange := RLPBlockRange{ + start: start, + hashes: make([][]byte, count), + headers: make([][]byte, count), + bodies: make([][]byte, count), + receipts: make([][]byte, count), + tds: make([][]byte, count), + } + var err error + + blockRange.hashes, err = freezer.AncientRange(rawdb.ChainFreezerHashTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read hashes from old freezer: %w", err) + } + blockRange.headers, err = freezer.AncientRange(rawdb.ChainFreezerHeaderTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read headers from old freezer: %w", err) + } + blockRange.bodies, err = freezer.AncientRange(rawdb.ChainFreezerBodiesTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read bodies from old freezer: %w", err) + } + blockRange.receipts, err = freezer.AncientRange(rawdb.ChainFreezerReceiptTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read receipts from old freezer: %w", err) + } + blockRange.tds, err = freezer.AncientRange(rawdb.ChainFreezerDifficultyTable, start, count, 0) + if err != nil { + return fmt.Errorf("failed to read tds from old freezer: %w", err) + } + + out <- blockRange + } + } + return nil +} + +func transformBlocks(ctx context.Context, in <-chan RLPBlockRange, out chan<- RLPBlockRange) error { + // Transform blocks from the in channel and send them to the out channel + defer close(out) + for blockRange := range in { + select { + case <-ctx.Done(): + return ctx.Err() + default: + for i := range blockRange.hashes { + blockNumber := blockRange.start + uint64(i) + + newHeader, err := transformHeader(blockRange.headers[i]) + if err != nil { + return fmt.Errorf("can't transform header: %w", err) + } + newBody, err := transformBlockBody(blockRange.bodies[i]) + if err != nil { + return fmt.Errorf("can't transform body: %w", err) + } + + if yes, newHash := hasSameHash(newHeader, blockRange.hashes[i]); !yes { + log.Error("Hash mismatch", "block", blockNumber, "oldHash", common.BytesToHash(blockRange.hashes[i]), "newHash", newHash) + return fmt.Errorf("hash mismatch at block %d", blockNumber) + } + + blockRange.headers[i] = newHeader + blockRange.bodies[i] = newBody + } + out <- blockRange + } + } + return nil +} + +func writeAncientBlocks(ctx context.Context, freezer *rawdb.Freezer, in <-chan RLPBlockRange) error { + // Write blocks from the in channel to the newDb + for blockRange := range in { + select { + case <-ctx.Done(): + return ctx.Err() + default: + _, err := freezer.ModifyAncients(func(aWriter ethdb.AncientWriteOp) error { + for i := range blockRange.hashes { + blockNumber := blockRange.start + uint64(i) + if err := aWriter.AppendRaw(rawdb.ChainFreezerHashTable, blockNumber, blockRange.hashes[i]); err != nil { + return fmt.Errorf("can't write hash to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerHeaderTable, blockNumber, blockRange.headers[i]); err != nil { + return fmt.Errorf("can't write header to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerBodiesTable, blockNumber, blockRange.bodies[i]); err != nil { + return fmt.Errorf("can't write body to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerReceiptTable, blockNumber, blockRange.receipts[i]); err != nil { + return fmt.Errorf("can't write receipts to Freezer: %w", err) + } + if err := aWriter.AppendRaw(rawdb.ChainFreezerDifficultyTable, blockNumber, blockRange.tds[i]); err != nil { + return fmt.Errorf("can't write td to Freezer: %w", err) + } + } + return nil + }) + if err != nil { + return fmt.Errorf("failed to write block range: %w", err) + } + log.Info("Wrote ancient blocks", "start", blockRange.start, "end", blockRange.start+uint64(len(blockRange.hashes)-1), "count", len(blockRange.hashes)) + } + } + return nil +} diff --git a/op-chain-ops/cmd/celo-dbmigrate/db.go b/op-chain-ops/cmd/celo-dbmigrate/db.go new file mode 100644 index 0000000000000..37675c07ca232 --- /dev/null +++ b/op-chain-ops/cmd/celo-dbmigrate/db.go @@ -0,0 +1,52 @@ +package main + +import ( + "encoding/binary" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb" +) + +const ( + DB_CACHE = 1024 // size of the cache in MB + DB_HANDLES = 60 // number of handles + LAST_MIGRATED_BLOCK_KEY = "celoLastMigratedBlock" +) + +var ( + headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header +) + +// encodeBlockNumber encodes a block number as big endian uint64 +func encodeBlockNumber(number uint64) []byte { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return enc +} + +// headerKey = headerPrefix + num (uint64 big endian) + hash +func headerKey(number uint64, hash common.Hash) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// readLastMigratedBlock returns the last migration number. +func readLastMigratedBlock(db ethdb.KeyValueReader) uint64 { + data, err := db.Get([]byte(LAST_MIGRATED_BLOCK_KEY)) + if err != nil { + return 0 + } + number := binary.BigEndian.Uint64(data) + return number +} + +// writeLastMigratedBlock stores the last migration number. +func writeLastMigratedBlock(db ethdb.KeyValueWriter, number uint64) error { + enc := make([]byte, 8) + binary.BigEndian.PutUint64(enc, number) + return db.Put([]byte(LAST_MIGRATED_BLOCK_KEY), enc) +} + +// deleteLastMigratedBlock removes the last migration number. +func deleteLastMigratedBlock(db ethdb.KeyValueWriter) error { + return db.Delete([]byte(LAST_MIGRATED_BLOCK_KEY)) +} diff --git a/op-chain-ops/cmd/celo-dbmigrate/main.go b/op-chain-ops/cmd/celo-dbmigrate/main.go new file mode 100644 index 0000000000000..4aada61af4a57 --- /dev/null +++ b/op-chain-ops/cmd/celo-dbmigrate/main.go @@ -0,0 +1,190 @@ +package main + +import ( + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime/debug" + + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/log" + "github.com/mattn/go-isatty" + "golang.org/x/exp/slog" +) + +// How to run: +// go run ./op-chain-ops/cmd/celo-dbmigrate -oldDB /path/to/oldDB -newDB /path/to/newDB [-batchSize 1000] [-verbosity 3] [-memoryLimit 7500] [-clear-all] [-clear-nonAncients] +// +// This script will migrate block data from the old database to the new database +// You can set the log level using the -verbosity flag +// The number of ancient records to migrate in one batch can be set using the -batchSize flag +// The default batch size is 1000 +// You can set a memory limit in MB using the -memoryLimit flag. Defaults to 7500 MB. Make sure to set a limit that is less than your machine's available memory. +// Use -clear-all to start with a fresh new database +// Use -clear-nonAncients to keep migrated ancients, but not non-ancients + +func main() { + oldDBPath := flag.String("oldDB", "", "Path to the old database chaindata directory (read-only)") + newDBPath := flag.String("newDB", "", "Path to the new database") + batchSize := flag.Uint64("batchSize", 10000, "Number of records to migrate in one batch") + memoryLimit := flag.Int64("memoryLimit", 7500, "Memory limit in MB") + + clearAll := flag.Bool("clear-all", false, "Use this to start with a fresh new database") + clearNonAncients := flag.Bool("clear-nonAncients", false, "Use to keep migrated ancients, but not non-ancients") + flag.Parse() + + color := isatty.IsTerminal(os.Stderr.Fd()) + handler := log.NewTerminalHandlerWithLevel(os.Stderr, slog.LevelDebug, color) + oplog.SetGlobalLogHandler(handler) + + debug.SetMemoryLimit(*memoryLimit * 1 << 20) // Set memory limit, converting from MB to bytes + + var err error + + // check that `rsync` command is available + if _, err := exec.LookPath("rsync"); err != nil { + log.Info("Please install `rsync` to use this script") + return + } + + if *oldDBPath == "" || *newDBPath == "" { + log.Info("Please provide both oldDB and newDB flags") + flag.Usage() + return + } + + if *clearAll { + if err := os.RemoveAll(*newDBPath); err != nil { + log.Crit("Failed to remove new database", "err", err) + } + } + if *clearNonAncients { + if err := cleanupNonAncientDb(*newDBPath); err != nil { + log.Crit("Failed to cleanup non-ancient database", "err", err) + } + } + + if err := createEmptyNewDb(*newDBPath); err != nil { + log.Crit("Failed to create new database", "err", err) + } + + var numAncientsNew uint64 + if numAncientsNew, err = migrateAncientsDb(*oldDBPath, *newDBPath, *batchSize); err != nil { + log.Crit("Failed to migrate ancients database", "err", err) + } + + var numNonAncients uint64 + if numNonAncients, err = migrateNonAncientsDb(*oldDBPath, *newDBPath, numAncientsNew-1, *batchSize); err != nil { + log.Crit("Failed to migrate non-ancients database", "err", err) + } + + log.Info("Migration Completed", "migratedAncients", numAncientsNew, "migratedNonAncients", numNonAncients) +} + +func migrateNonAncientsDb(oldDbPath, newDbPath string, lastAncientBlock, batchSize uint64) (uint64, error) { + // First copy files from old database to new database + log.Info("Copy files from old database", "process", "db migration") + cmd := exec.Command("rsync", "-v", "-a", "--exclude=ancient", oldDbPath+"/", newDbPath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return 0, fmt.Errorf("failed to copy old database to new database: %w", err) + } + + // Open the new database without access to AncientsDb + newDB, err := rawdb.NewLevelDBDatabase(newDbPath, DB_CACHE, DB_HANDLES, "", false) + if err != nil { + return 0, fmt.Errorf("failed to open new database: %w", err) + } + defer newDB.Close() + + // get the last block number + hash := rawdb.ReadHeadHeaderHash(newDB) + lastBlock := *rawdb.ReadHeaderNumber(newDB, hash) + lastMigratedBlock := readLastMigratedBlock(newDB) + + // if migration was interrupted, start from the last migrated block + fromBlock := max(lastAncientBlock, lastMigratedBlock) + 1 + + log.Info("Migration started", "process", "db migration", "startBlock", fromBlock, "endBlock", lastBlock, "count", lastBlock-fromBlock) + + for i := fromBlock; i <= lastBlock; i += batchSize { + numbersHash := rawdb.ReadAllHashesInRange(newDB, i, i+batchSize-1) + + log.Info("Processing Range", "process", "db migration", "from", i, "to(inclusve)", i+batchSize-1, "count", len(numbersHash)) + for _, numberHash := range numbersHash { + // read header and body + header := rawdb.ReadHeaderRLP(newDB, numberHash.Hash, numberHash.Number) + body := rawdb.ReadBodyRLP(newDB, numberHash.Hash, numberHash.Number) + + // transform header and body + newHeader, err := transformHeader(header) + if err != nil { + return 0, fmt.Errorf("failed to transform header: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + newBody, err := transformBlockBody(body) + if err != nil { + return 0, fmt.Errorf("failed to transform body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + + if yes, newHash := hasSameHash(newHeader, numberHash.Hash[:]); !yes { + log.Error("Hash mismatch", "block", numberHash.Number, "oldHash", numberHash.Hash, "newHash", newHash) + return 0, fmt.Errorf("hash mismatch at block %d - %x", numberHash.Number, numberHash.Hash) + } + + // write header and body + batch := newDB.NewBatch() + rawdb.WriteBodyRLP(batch, numberHash.Hash, numberHash.Number, newBody) + _ = batch.Put(headerKey(numberHash.Number, numberHash.Hash), newHeader) + _ = writeLastMigratedBlock(batch, numberHash.Number) + if err := batch.Write(); err != nil { + return 0, fmt.Errorf("failed to write header and body: block %d - %x: %w", numberHash.Number, numberHash.Hash, err) + } + } + } + + toBeRemoved := rawdb.ReadAllHashesInRange(newDB, 1, lastAncientBlock) + log.Info("Removing frozen blocks", "process", "db migration", "count", len(toBeRemoved)) + batch := newDB.NewBatch() + for _, numberHash := range toBeRemoved { + rawdb.DeleteBlockWithoutNumber(batch, numberHash.Hash, numberHash.Number) + rawdb.DeleteCanonicalHash(batch, numberHash.Number) + } + if err := batch.Write(); err != nil { + return 0, fmt.Errorf("failed to delete frozen blocks: %w", err) + } + + // if migration finished, remove the last migration number + if err := deleteLastMigratedBlock(newDB); err != nil { + return 0, fmt.Errorf("failed to delete last migration number: %w", err) + } + log.Info("Migration ended", "process", "db migration", "migratedBlocks", lastBlock-fromBlock+1, "removedBlocks", len(toBeRemoved)) + + return lastBlock - fromBlock + 1, nil +} + +func createEmptyNewDb(newDBPath string) error { + if err := os.MkdirAll(newDBPath, 0755); err != nil { + return fmt.Errorf("failed to create new database directory: %w", err) + } + return nil +} + +func cleanupNonAncientDb(dir string) error { + files, err := os.ReadDir(dir) + if err != nil { + return fmt.Errorf("failed to read directory: %w", err) + } + for _, file := range files { + if file.Name() != "ancient" { + err := os.RemoveAll(filepath.Join(dir, file.Name())) + if err != nil { + return fmt.Errorf("failed to remove file: %w", err) + } + } + } + return nil +} diff --git a/op-chain-ops/cmd/celo-dbmigrate/transform.go b/op-chain-ops/cmd/celo-dbmigrate/transform.go new file mode 100644 index 0000000000000..650c64b72230a --- /dev/null +++ b/op-chain-ops/cmd/celo-dbmigrate/transform.go @@ -0,0 +1,104 @@ +package main + +import ( + "bytes" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + IstanbulExtraVanity = 32 // Fixed number of extra-data bytes reserved for validator vanity +) + +// IstanbulAggregatedSeal is the aggregated seal for Istanbul blocks +type IstanbulAggregatedSeal struct { + // Bitmap is a bitmap having an active bit for each validator that signed this block + Bitmap *big.Int + // Signature is an aggregated BLS signature resulting from signatures by each validator that signed this block + Signature []byte + // Round is the round in which the signature was created. + Round *big.Int +} + +// IstanbulExtra is the extra-data for Istanbul blocks +type IstanbulExtra struct { + // AddedValidators are the validators that have been added in the block + AddedValidators []common.Address + // AddedValidatorsPublicKeys are the BLS public keys for the validators added in the block + AddedValidatorsPublicKeys [][96]byte + // RemovedValidators is a bitmap having an active bit for each removed validator in the block + RemovedValidators *big.Int + // Seal is an ECDSA signature by the proposer + Seal []byte + // AggregatedSeal contains the aggregated BLS signature created via IBFT consensus. + AggregatedSeal IstanbulAggregatedSeal + // ParentAggregatedSeal contains and aggregated BLS signature for the previous block. + ParentAggregatedSeal IstanbulAggregatedSeal +} + +// transformHeader removes the aggregated seal from the header +func transformHeader(header []byte) ([]byte, error) { + newHeader := new(types.Header) + err := rlp.DecodeBytes(header, &newHeader) + if err != nil { + return nil, err + } + + if len(newHeader.Extra) < IstanbulExtraVanity { + return nil, errors.New("invalid istanbul header extra-data") + } + + istanbulExtra := IstanbulExtra{} + err = rlp.DecodeBytes(newHeader.Extra[IstanbulExtraVanity:], &istanbulExtra) + if err != nil { + return nil, err + } + + istanbulExtra.AggregatedSeal = IstanbulAggregatedSeal{} + + payload, err := rlp.EncodeToBytes(&istanbulExtra) + if err != nil { + return nil, err + } + + newHeader.Extra = append(newHeader.Extra[:IstanbulExtraVanity], payload...) + + return rlp.EncodeToBytes(newHeader) +} + +func hasSameHash(newHeader, oldHash []byte) (bool, common.Hash) { + newHash := crypto.Keccak256Hash(newHeader) + return bytes.Equal(oldHash, newHash.Bytes()), newHash +} + +// transformBlockBody migrates the block body from the old format to the new format (works with []byte input output) +func transformBlockBody(oldBodyData []byte) ([]byte, error) { + // decode body into celo-blockchain Body structure + // remove epochSnarkData and randomness data + var celoBody struct { + Transactions types.Transactions + Randomness rlp.RawValue + EpochSnarkData rlp.RawValue + } + if err := rlp.DecodeBytes(oldBodyData, &celoBody); err != nil { + return nil, fmt.Errorf("failed to RLP decode body: %w", err) + } + + // transform into op-geth types.Body structure + newBody := types.Body{ + Transactions: celoBody.Transactions, + Uncles: []*types.Header{}, + } + newBodyData, err := rlp.EncodeToBytes(newBody) + if err != nil { + return nil, fmt.Errorf("failed to RLP encode body: %w", err) + } + + return newBodyData, nil +}