Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ func initGenesis(ctx *cli.Context) error {
log.Warn("Multi-database is an experimental feature")
}

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle(), false)
defer triedb.Close()

_, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
Expand Down Expand Up @@ -1091,7 +1091,7 @@ func dump(ctx *cli.Context) error {
return err
}
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false) // always enable preimage lookup
triedb := utils.MakeTrieDatabase(ctx, stack, db, true, true, false, false) // always enable preimage lookup
defer triedb.Close()

state, err := state.New(root, state.NewDatabase(triedb, nil))
Expand Down
23 changes: 21 additions & 2 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ Remove blockchain and state databases`,
dbTrieDeleteCmd,
dbDeleteTrieStateCmd,
ancientInspectCmd,
incrInspectCmd,
},
}
dbInspectCmd = &cli.Command{
Expand Down Expand Up @@ -275,6 +276,13 @@ of ancientStore, will also displays the reserved number of blocks in ancientStor
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command queries the history of the account or storage slot within the specified block range",
}
incrInspectCmd = &cli.Command{
Action: inspectIncrSnapshot,
Name: "inspect-incr-snapshot",
Flags: []cli.Flag{utils.IncrSnapshotPathFlag},
Usage: "Inspect the incremental snapshot information",
Description: `This command reads and displays incremental store information`,
}
)

func removeDB(ctx *cli.Context) error {
Expand Down Expand Up @@ -945,7 +953,7 @@ func dbDumpTrie(ctx *cli.Context) error {

db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

var (
Expand Down Expand Up @@ -1279,7 +1287,7 @@ func inspectHistory(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, db, false, false, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, false, false, false)
defer triedb.Close()

var (
Expand Down Expand Up @@ -1327,3 +1335,14 @@ func inspectHistory(ctx *cli.Context) error {
}
return inspectStorage(triedb, start, end, address, slot, ctx.Bool("raw"))
}

func inspectIncrSnapshot(ctx *cli.Context) error {
if !ctx.IsSet(utils.IncrSnapshotPathFlag.Name) {
return errors.New("increment snapshot path is not set")
}
baseDir := ctx.String(utils.IncrSnapshotPathFlag.Name)
if err := rawdb.InspectIncrStore(baseDir); err != nil {
return err
}
return nil
}
7 changes: 7 additions & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,13 @@ var (
utils.LogDebugFlag,
utils.LogBacktraceAtFlag,
utils.BlobExtraReserveFlag,
utils.EnableIncrSnapshotFlag,
utils.IncrSnapshotPathFlag,
utils.IncrSnapshotBlockIntervalFlag,
utils.IncrSnapshotStateBufferFlag,
utils.IncrSnapshotKeptBlocksFlag,
utils.UseRemoteIncrSnapshotFlag,
utils.RemoteIncrSnapshotURLFlag,
// utils.BeaconApiFlag,
// utils.BeaconApiHeaderFlag,
// utils.BeaconThresholdFlag,
Expand Down
87 changes: 82 additions & 5 deletions cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (

"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/pruner"
Expand Down Expand Up @@ -163,6 +164,15 @@ The export-preimages command exports hash preimages to a flat file, in exactly
the expected order for the overlay tree migration.
`,
},
{
Action: mergeIncrSnapshot,
Name: "merge-incr-snapshot",
Usage: "Merge the incremental snapshot into local data",
ArgsUsage: "",
Flags: slices.Concat([]cli.Flag{utils.IncrSnapshotPathFlag},
utils.DatabaseFlags),
Description: `This command merges multiple incremental snapshots into local data`,
},
},
}
)
Expand Down Expand Up @@ -220,7 +230,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

var (
Expand Down Expand Up @@ -285,7 +295,7 @@ func traverseState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -394,7 +404,7 @@ func traverseRawState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

headBlock := rawdb.ReadHeadBlock(chaindb)
Expand Down Expand Up @@ -562,7 +572,7 @@ func dumpState(ctx *cli.Context) error {
return err
}
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false, false)
defer triedb.Close()

snapConfig := snapshot.Config{
Expand Down Expand Up @@ -645,7 +655,7 @@ func snapshotExportPreimages(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()

triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false)
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, false, true, false, false)
defer triedb.Close()

var root common.Hash
Expand Down Expand Up @@ -707,3 +717,70 @@ func checkAccount(ctx *cli.Context) error {
log.Info("Checked the snapshot journalled storage", "time", common.PrettyDuration(time.Since(start)))
return nil
}

// mergeIncrSnapshot merges the incremental snapshot into local data.
func mergeIncrSnapshot(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()

chainDB := utils.MakeChainDatabase(ctx, stack, false)
defer chainDB.Close()

trieDB := utils.MakeTrieDatabase(ctx, stack, chainDB, false, false, false, true)
defer trieDB.Close()

if !ctx.IsSet(utils.IncrSnapshotPathFlag.Name) {
return errors.New("incremental snapshot path is not set")
}
path := ctx.String(utils.IncrSnapshotPathFlag.Name)

startBlock, err := trieDB.GetStartBlock()
if err != nil {
log.Error("Failed to get start block", "error", err)
return err
}
dirs, err := rawdb.GetAllIncrDirs(path)
if err != nil {
log.Error("Failed to get all incremental directories", "err", err)
return err
}
if startBlock < dirs[0].StartBlockNum {
return fmt.Errorf("local start block %d is lower than incr first start block %d", startBlock, dirs[0].StartBlockNum)
}

for i := 1; i < len(dirs); i++ {
prevFile := dirs[i-1]
currFile := dirs[i]

expectedStartBlock := prevFile.EndBlockNum + 1
if currFile.StartBlockNum != expectedStartBlock {
return fmt.Errorf("file continuity broken: file %s ends at %d, but file %s starts at %d (expected %d)",
prevFile.Name, prevFile.EndBlockNum, currFile.Name, currFile.StartBlockNum, expectedStartBlock)
}
}

log.Info("Start merging incremental snapshot", "path", path, "incremental snapshot number", len(dirs))
for i, dir := range dirs {
if i == len(dirs)-1 {
complete, err := rawdb.CheckIncrSnapshotComplete(dir.Path)
if err != nil {
log.Error("Failed to check last incr snapshot complete", "err", err)
return err
}
if !complete {
log.Warn("Skip last incr snapshot due to data is incomplete")
continue
}
}

if dir.StartBlockNum >= startBlock && dir.EndBlockNum > startBlock {
if err = core.MergeIncrSnapshot(chainDB, trieDB, dir.Path); err != nil {
log.Error("Failed to merge incremental snapshot", "err", err)
return err
}
} else {
log.Info("Skip merge incremental snapshot", "dir", dir.Name)
}
}
return nil
}
81 changes: 80 additions & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1274,6 +1274,50 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: fakebeacon.DefaultPort,
Category: flags.APICategory,
}

// incremental snapshot related flags
EnableIncrSnapshotFlag = &cli.BoolFlag{
Name: "incr.enable",
Usage: "Enable incremental snapshot generation",
Value: false,
Category: flags.StateCategory,
}
IncrSnapshotPathFlag = &flags.DirectoryFlag{
Name: "incr.datadir",
Usage: "Data directory for storing incremental snapshot data: can be used to store generated or downloaded incremental snapshot",
Value: "",
Category: flags.StateCategory,
}
IncrSnapshotBlockIntervalFlag = &cli.Uint64Flag{
Name: "incr.block-interval",
Usage: "Set how many blocks interval are stored into one incremental snapshot",
Value: pathdb.DefaultBlockInterval,
Category: flags.StateCategory,
}
IncrSnapshotStateBufferFlag = &cli.Uint64Flag{
Name: "incr.state-buffer",
Usage: "Set the incr state memory buffer to aggregate MPT trie nodes. The larger the setting, the smaller the incr snapshot size",
Value: pathdb.DefaultIncrStateBufferSize,
Category: flags.StateCategory,
}
IncrSnapshotKeptBlocksFlag = &cli.Uint64Flag{
Name: "incr.kept-blocks",
Usage: "Set how many blocks are kept in incr snapshot. At least is 1024 blocks",
Value: pathdb.DefaultKeptBlocks,
Category: flags.StateCategory,
}
UseRemoteIncrSnapshotFlag = &cli.BoolFlag{
Name: "incr.use-remote",
Usage: "Enable download and merge incremental snapshots into local data",
Value: false,
Category: flags.StateCategory,
}
RemoteIncrSnapshotURLFlag = &cli.StringFlag{
Name: "incr.remote-url",
Usage: "Set from which remote url is used to download incremental snapshots",
Value: "",
Category: flags.StateCategory,
}
)

var (
Expand Down Expand Up @@ -2318,6 +2362,38 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.VMTraceJsonConfig = ctx.String(VMTraceJsonConfigFlag.Name)
}
}

// Download and merge incremental snapshot config
if ctx.IsSet(UseRemoteIncrSnapshotFlag.Name) {
cfg.UseRemoteIncrSnapshot = true
if !ctx.IsSet(RemoteIncrSnapshotURLFlag.Name) {
Fatalf("Must provide a remote increment snapshot URL")
} else {
cfg.RemoteIncrSnapshotURL = ctx.String(RemoteIncrSnapshotURLFlag.Name)
}
if ctx.IsSet(IncrSnapshotPathFlag.Name) {
cfg.IncrSnapshotPath = ctx.String(IncrSnapshotPathFlag.Name)
} else {
Fatalf("Must provide a path to store downloaded incr snapshot")
}
}

// enable incremental snapshot generation config
if ctx.IsSet(EnableIncrSnapshotFlag.Name) {
cfg.EnableIncrSnapshots = true
if ctx.IsSet(IncrSnapshotPathFlag.Name) {
cfg.IncrSnapshotPath = ctx.String(IncrSnapshotPathFlag.Name)
}
if ctx.IsSet(IncrSnapshotBlockIntervalFlag.Name) {
cfg.IncrSnapshotBlockInterval = ctx.Uint64(IncrSnapshotBlockIntervalFlag.Name)
}
if ctx.IsSet(IncrSnapshotStateBufferFlag.Name) {
cfg.IncrSnapshotStateBuffer = ctx.Uint64(IncrSnapshotStateBufferFlag.Name)
}
if ctx.IsSet(IncrSnapshotKeptBlocksFlag.Name) {
cfg.IncrSnapshotKeptBlocks = ctx.Uint64(IncrSnapshotKeptBlocksFlag.Name)
}
}
}

// SetDNSDiscoveryDefaults configures DNS discovery with the given URL if
Expand Down Expand Up @@ -2783,7 +2859,7 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}

// MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool, mergeIncr bool) *triedb.Database {
config := &triedb.Config{
Preimages: preimage,
IsVerkle: isVerkle,
Expand All @@ -2803,6 +2879,9 @@ func MakeTrieDatabase(ctx *cli.Context, stack *node.Node, disk ethdb.Database, p
config.PathDB = pathdb.ReadOnly
} else {
config.PathDB = pathdb.Defaults
if mergeIncr {
config.PathDB.MergeIncr = true
}
}
config.PathDB.JournalFilePath = fmt.Sprintf("%s/%s", stack.ResolvePath("chaindata"), eth.JournalFileName)
return triedb.NewDatabase(disk, config)
Expand Down
Loading