Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -438,10 +438,11 @@ var (
Usage: "Do not maintain log search index",
Category: flags.StateCategory,
}
// Deprecated Jan 2025
LogExportCheckpointsFlag = &cli.StringFlag{
Name: "history.logs.export",
Usage: "Export checkpoints to file in go source file format",
Category: flags.StateCategory,
Usage: "Deprecated, checkpoint file is auto-enabled at datadir/geth/filtermap_checkpoints.json",
Category: flags.DeprecatedCategory,
Value: "",
}
// Beacon client light sync settings
Expand Down Expand Up @@ -2219,6 +2220,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
if ctx.IsSet(LogExportCheckpointsFlag.Name) {
cfg.LogExportCheckpoints = ctx.String(LogExportCheckpointsFlag.Name)
log.Warn("Flag --history.logs.export is deprecated, checkpoint file is auto-enabled at datadir/geth/filtermap_checkpoints.json")
}
if ctx.String(GCModeFlag.Name) == "archive" && cfg.BlockHistory != 0 {
cfg.BlockHistory = 0
Expand Down
1 change: 1 addition & 0 deletions cmd/utils/flags_legacy.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ var DeprecatedFlags = []cli.Flag{
EnablePersonal,
PruneAncientDataFlag,
JournalFileFlag,
LogExportCheckpointsFlag,
}

var (
Expand Down
52 changes: 42 additions & 10 deletions core/filtermaps/filtermaps.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package filtermaps

import (
"encoding/json"
"errors"
"fmt"
"os"
Expand Down Expand Up @@ -76,7 +77,7 @@ type FilterMaps struct {
closeWg sync.WaitGroup
history uint64
hashScheme bool // use hashdb-safe delete range method
exportFileName string
checkpointFile string
Params

db ethdb.KeyValueStore
Expand Down Expand Up @@ -216,9 +217,10 @@ type Config struct {
History uint64 // number of historical blocks to index
Disabled bool // disables indexing completely

// This option enables the checkpoint JSON file generator.
// If set, the given file will be updated with checkpoint information.
ExportFileName string
// CheckpointFileName specifies the path to the checkpoint JSON file.
// If set, checkpoints will be loaded from this file during initialization,
// and the file will be updated with new checkpoint information during operation.
CheckpointFileName string

// expect trie nodes of hash based state scheme in the filtermaps key range;
// use safe iterator based implementation of DeleteRange that skips them
Expand All @@ -245,7 +247,7 @@ func NewFilterMaps(db ethdb.KeyValueStore, initView *ChainView, historyCutoff, f
disabled: config.Disabled,
hashScheme: config.HashScheme,
disabledCh: make(chan struct{}),
exportFileName: config.ExportFileName,
checkpointFile: config.CheckpointFileName,
Params: params,
targetView: initView,
indexedView: initView,
Expand Down Expand Up @@ -370,6 +372,17 @@ func (f *FilterMaps) isShuttingDown() bool {
}
}

// loadCustomCheckpoints safely loads and parses a checkpoint list from JSON data.
// Returns an empty list if the data is invalid or parsing fails.
func loadCustomCheckpoints(data []byte) checkpointList {
var result checkpointList
if err := json.Unmarshal(data, &result); err != nil {
log.Warn("Failed to parse custom checkpoint file", "error", err)
return nil
}
return result
}

// init initializes an empty log index according to the current targetView.
func (f *FilterMaps) init() error {
// ensure that there is no remaining data in the filter maps key range
Expand All @@ -380,19 +393,38 @@ func (f *FilterMaps) init() error {
f.indexLock.Lock()
defer f.indexLock.Unlock()

// Load checkpoints from custom file if specified
if f.checkpointFile != "" {
if data, err := os.ReadFile(f.checkpointFile); err == nil {
if customCheckpoints := loadCustomCheckpoints(data); len(customCheckpoints) > 0 {
checkpoints = append(checkpoints, customCheckpoints)
log.Info("Loaded custom checkpoints from file",
"path", f.checkpointFile,
"count", len(customCheckpoints))
}
}
}

var bestIdx, bestLen int
for idx, checkpointList := range checkpoints {
// binary search for the last matching epoch head
min, max := 0, len(checkpointList)
for min < max {
mid := (min + max + 1) / 2
cp := checkpointList[mid-1]
if cp.BlockNumber <= f.targetView.HeadNumber() && f.targetView.BlockId(cp.BlockNumber) == cp.BlockId {
if checkpointList[mid-1].BlockNumber <= f.targetView.HeadNumber() {
min = mid
} else {
max = mid - 1
}
}
if max == 0 {
continue
}
// verify the latest checkpoint within range
cp := checkpointList[max-1]
if f.targetView.BlockId(cp.BlockNumber) != cp.BlockId {
continue
}
if max > bestLen {
bestIdx, bestLen = idx, max
}
Expand Down Expand Up @@ -871,14 +903,14 @@ func (f *FilterMaps) exportCheckpoints() {
if epochCount == f.lastFinalEpoch {
return
}
w, err := os.Create(f.exportFileName)
w, err := os.Create(f.checkpointFile)
if err != nil {
log.Error("Error creating checkpoint export file", "name", f.exportFileName, "error", err)
log.Error("Error creating checkpoint export file", "name", f.checkpointFile, "error", err)
return
}
defer w.Close()

log.Info("Exporting log index checkpoints", "epochs", epochCount, "file", f.exportFileName)
log.Info("Exporting log index checkpoints", "epochs", epochCount, "file", f.checkpointFile)
w.WriteString("[\n")
comma := ","
for epoch := uint32(0); epoch < epochCount; epoch++ {
Expand Down
2 changes: 1 addition & 1 deletion core/filtermaps/indexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func (f *FilterMaps) indexerLoop() {
}
} else {
if f.finalBlock != f.lastFinal {
if f.exportFileName != "" {
if f.checkpointFile != "" {
f.exportCheckpoints()
}
f.lastFinal = f.finalBlock
Expand Down
12 changes: 8 additions & 4 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"fmt"
"math"
"math/big"
"path/filepath"
"runtime"
"sync"
"time"
Expand Down Expand Up @@ -404,11 +405,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}

// Initialize filtermaps log index.
// Auto-enable checkpoint file
checkpointFile := filepath.Join(stack.DataDir(), "geth", "filtermap_checkpoints.json")

fmConfig := filtermaps.Config{
History: config.LogHistory,
Disabled: config.LogNoHistory,
ExportFileName: config.LogExportCheckpoints,
HashScheme: config.StateScheme == rawdb.HashScheme,
History: config.LogHistory,
Disabled: config.LogNoHistory,
CheckpointFileName: checkpointFile,
HashScheme: config.StateScheme == rawdb.HashScheme,
}
chainView := eth.newChainView(eth.blockchain.CurrentBlock())
historyCutoff, _ := eth.blockchain.HistoryPruningCutoff()
Expand Down
11 changes: 6 additions & 5 deletions eth/ethconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,12 @@ type Config struct {
// Deprecated: use 'TransactionHistory' instead.
TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.

TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
BlockHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose block body/header/receipt/diff/hash are reserved.
LogHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head where a log search index is maintained.
LogNoHistory bool `toml:",omitempty"` // No log search index is maintained.
LogExportCheckpoints string // export log index checkpoints to file
TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved.
BlockHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose block body/header/receipt/diff/hash are reserved.
LogHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head where a log search index is maintained.
LogNoHistory bool `toml:",omitempty"` // No log search index is maintained.
// Deprecated: checkpoint file is auto-enabled at datadir/geth/filtermap_checkpoints.json.
LogExportCheckpoints string
StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved.

// State scheme represents the scheme used to store ethereum states and trie
Expand Down
6 changes: 3 additions & 3 deletions eth/filters/filter_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,9 @@ func (b *testBackend) startFilterMaps(history uint64, disabled bool, params filt
head := b.CurrentBlock()
chainView := filtermaps.NewChainView(b, head.Number.Uint64(), head.Hash())
config := filtermaps.Config{
History: history,
Disabled: disabled,
ExportFileName: "",
History: history,
Disabled: disabled,
CheckpointFileName: "",
}
b.fm, _ = filtermaps.NewFilterMaps(b.db, chainView, 0, 0, params, config)
b.fm.Start()
Expand Down
Loading