Skip to content

Commit

Permalink
core: adapt some cmd for multi-database
Browse files Browse the repository at this point in the history
  • Loading branch information
jingjunLi committed Mar 20, 2024
1 parent 3fc52fc commit 7e3e660
Show file tree
Hide file tree
Showing 7 changed files with 150 additions and 30 deletions.
7 changes: 7 additions & 0 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,13 @@ func dumpGenesis(ctx *cli.Context) error {
}
continue
}
// set the separate state & block database
if stack.CheckIfMultiDataBase() && err == nil {
stateDiskDb := utils.MakeStateDataBase(ctx, stack, true, false)
db.SetStateStore(stateDiskDb)
blockDb := utils.MakeBlockDatabase(ctx, stack, true, false)
db.SetBlockStore(blockDb)
}
genesis, err := core.ReadGenesis(db)
if err != nil {
utils.Fatalf("failed to read genesis: %s", err)
Expand Down
72 changes: 50 additions & 22 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -509,14 +509,19 @@ func checkStateContent(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
var (
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
it ethdb.Iterator
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
errs int
count int
startTime = time.Now()
lastLog = time.Now()
)
if stack.CheckIfMultiDataBase() {
it = rawdb.NewKeyLengthIterator(db.StateStore().NewIterator(prefix, start), 32)
} else {
it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
}
for it.Next() {
count++
k := it.Key()
Expand Down Expand Up @@ -634,22 +639,20 @@ func dbGet(ctx *cli.Context) error {
log.Info("Could not decode the key", "error", err)
return err
}

data, err := db.Get(key)
if err != nil {
if stack.CheckIfMultiDataBase() {
// if chainDb don't exist, try to get it from multidatabase
stateData, dberr := db.StateStore().Get(key)
if dberr == nil {
fmt.Printf("key %#x: %#x\n", key, stateData)
return nil
}
if blockData, dberr := db.BlockStore().Get(key); dberr != nil {
fmt.Printf("key %#x: %#x\n", key, blockData)
return nil
}
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
} else {
opDb = db
}
}

data, err := opDb.Get(key)
if err != nil {
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err
}
Expand Down Expand Up @@ -812,11 +815,23 @@ func dbDelete(ctx *cli.Context) error {
log.Info("Could not decode the key", "error", err)
return err
}
data, err := db.Get(key)
opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
} else {
opDb = db
}
}

data, err := opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
if err = db.Delete(key); err != nil {
if err = opDb.Delete(key); err != nil {
log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
return err
}
Expand Down Expand Up @@ -850,11 +865,24 @@ func dbPut(ctx *cli.Context) error {
log.Info("Could not decode the value", "error", err)
return err
}
data, err = db.Get(key)

opDb := db
if stack.CheckIfMultiDataBase() {
keyType := rawdb.DataTypeByKey(key)
if keyType == rawdb.StateDataType {
opDb = db.StateStore()
} else if keyType == rawdb.BlockDataType {
opDb = db.BlockStore()
} else {
opDb = db
}
}

data, err = opDb.Get(key)
if err == nil {
fmt.Printf("Previous value: %#x\n", data)
}
return db.Put(key, value)
return opDb.Put(key, value)
}

// dbDumpTrie shows the key-value slots of a given storage trie
Expand Down Expand Up @@ -945,7 +973,7 @@ func freezerInspect(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
stack.Close()
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
return rawdb.InspectFreezerTable(ancient, freezer, table, start, end, stack.CheckIfMultiDataBase())
}

func importLDBdata(ctx *cli.Context) error {
Expand Down Expand Up @@ -1085,11 +1113,11 @@ func showMetaData(ctx *cli.Context) error {
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
ancients, err := db.Ancients()
ancients, err := db.BlockStore().Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
data := rawdb.ReadChainMetadata(db)
data := rawdb.ReadChainMetadataCmd(db)
data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
if b := rawdb.ReadHeadBlock(db); b != nil {
Expand Down
12 changes: 11 additions & 1 deletion cmd/geth/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,17 @@ func pruneBlock(ctx *cli.Context) error {
if !ctx.IsSet(utils.AncientFlag.Name) {
return errors.New("datadir.ancient must be set")
} else {
oldAncientPath = ctx.String(utils.AncientFlag.Name)
if stack.CheckIfMultiDataBase() {
ancientPath := ctx.String(utils.AncientFlag.Name)
index := strings.LastIndex(ancientPath, "/ancient/chain")
if index != -1 {
oldAncientPath = ancientPath[:index] + "/block/ancient/chain"
}
log.Info("Start to prune block data 3333", "oldAncientPath", oldAncientPath, "index", index, "ancientPath", ancientPath)
} else {
oldAncientPath = ctx.String(utils.AncientFlag.Name)
}
log.Info("Start to prune block data 3333", "oldAncientPath", oldAncientPath)
if !filepath.IsAbs(oldAncientPath) {
// force absolute paths, which often fail due to the splicing of relative paths
return errors.New("datadir.ancient not abs path")
Expand Down
2 changes: 1 addition & 1 deletion cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -2336,7 +2336,7 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
// MakeStateDataBase open a separate state database using the flags passed to the client and will hard crash if it fails.
func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database {
cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 2
handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) * 90 / 100
statediskdb, err := stack.OpenDatabaseWithFreezer("chaindata/state", cache, handles, "", "", readonly, disableFreeze, false, false)
if err != nil {
Fatalf("Failed to open separate trie database: %v", err)
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1596,11 +1596,11 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
rawdb.WritePreimages(bc.db, state.Preimages())
blockBatch := bc.db.BlockStore().NewBatch()
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(blockBatch, state.Preimages())
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
Expand Down
15 changes: 12 additions & 3 deletions core/rawdb/ancient_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,16 +121,25 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
// ancient indicates the path of root ancient directory where the chain freezer can
// be opened. Start and end specify the range for dumping out indexes.
// Note this function can only be used for debugging purposes.
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64) error {
func InspectFreezerTable(ancient string, freezerName string, tableName string, start, end int64, multiDatabase bool) error {
var (
path string
tables map[string]bool
)
switch freezerName {
case ChainFreezerName:
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
if multiDatabase {
path, tables = resolveChainFreezerDir(filepath.Dir(ancient)+"/block/ancient"), chainFreezerNoSnappy
} else {
path, tables = resolveChainFreezerDir(ancient), chainFreezerNoSnappy
}

case StateFreezerName:
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
if multiDatabase {
path, tables = filepath.Join(filepath.Dir(ancient)+"/state/ancient", freezerName), stateFreezerNoSnappy
} else {
path, tables = filepath.Join(ancient, freezerName), stateFreezerNoSnappy
}
default:
return fmt.Errorf("unknown freezer, supported ones: %v", freezers)
}
Expand Down
70 changes: 68 additions & 2 deletions core/rawdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ func AncientInspect(db ethdb.Database) error {
offset := counter(ReadOffSetOfCurrentAncientFreezer(db))
// Get number of ancient rows inside the freezer.
ancients := counter(0)
if count, err := db.ItemAmountInAncient(); err != nil {
if count, err := db.BlockStore().ItemAmountInAncient(); err != nil {
log.Error("failed to get the items amount in ancientDB", "err", err)
return err
} else {
Expand Down Expand Up @@ -698,6 +698,48 @@ func PruneHashTrieNodeInDataBase(db ethdb.Database) error {
return nil
}

type DataType int

const (
StateDataType DataType = iota
BlockDataType
ChainDataType
Unknown
)

func DataTypeByKey(key []byte) DataType {
switch {
// state
case IsLegacyTrieNode(key, key),
bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength,
IsAccountTrieNode(key),
IsStorageTrieNode(key):
return StateDataType

// block
case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength),
bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength),
bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength),
bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix),
bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix),
bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
return BlockDataType
default:
for _, meta := range [][]byte{
fastTrieProgressKey, persistentStateIDKey, trieJournalKey, snapSyncStatusFlagKey} {
if bytes.Equal(key, meta) {
return StateDataType
}
}
for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey} {
if bytes.Equal(key, meta) {
return BlockDataType
}
}
return ChainDataType
}
}

// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
Expand Down Expand Up @@ -874,12 +916,12 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
}
// inspect separate block db
if blockIter != nil {
count = 0
logged = time.Now()
log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))

for blockIter.Next() {
var (
Expand Down Expand Up @@ -921,6 +963,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
log.Info("Inspecting separate block database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
}
// Display the database statistic of key-value store.
stats := [][]string{
Expand Down Expand Up @@ -1030,3 +1073,26 @@ func ReadChainMetadata(db ethdb.KeyValueStore) [][]string {
}
return data
}

func ReadChainMetadataCmd(db ethdb.Database) [][]string {
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (%#x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db.BlockStore()))},
{"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db.BlockStore()))},
{"lastPivotNumber", pp(ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))},
{"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))},
{"txIndexTail", pp(ReadTxIndexTail(db))},
}
return data
}

0 comments on commit 7e3e660

Please sign in to comment.