diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index d6efceeaa2..fb840792e0 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -306,11 +306,6 @@ func initGenesis(ctx *cli.Context) error { utils.Fatalf("Failed to open separate trie database: %v", dbErr) } chaindb.SetStateStore(statediskdb) - blockdb, err := stack.OpenDatabaseWithFreezer(name+"/block", 0, 0, "", "", false, false) - if err != nil { - utils.Fatalf("Failed to open separate block database: %v", err) - } - chaindb.SetBlockStore(blockdb) log.Warn("Multi-database is an experimental feature") } @@ -698,8 +693,6 @@ func dumpGenesis(ctx *cli.Context) error { if stack.CheckIfMultiDataBase() && err == nil { stateDiskDb := utils.MakeStateDataBase(ctx, stack, true, false) db.SetStateStore(stateDiskDb) - blockDb := utils.MakeBlockDatabase(ctx, stack, true, false) - db.SetBlockStore(blockDb) } genesis, err = core.ReadGenesis(db) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 51a8600710..3b9b0440ad 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -574,11 +574,8 @@ func checkStateContent(ctx *cli.Context) error { startTime = time.Now() lastLog = time.Now() ) - if stack.CheckIfMultiDataBase() { - it = rawdb.NewKeyLengthIterator(db.StateStore().NewIterator(prefix, start), 32) - } else { - it = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32) - } + + it = rawdb.NewKeyLengthIterator(db.GetStateStore().NewIterator(prefix, start), 32) for it.Next() { count++ k := it.Key() @@ -621,11 +618,9 @@ func dbStats(ctx *cli.Context) error { defer db.Close() showDBStats(db) - if stack.CheckIfMultiDataBase() { + if db.HasSeparateStateStore() { fmt.Println("show stats of state store") - showDBStats(db.StateStore()) - fmt.Println("show stats of block store") - showDBStats(db.BlockStore()) + showDBStats(db.GetStateStore()) } return nil @@ -640,11 +635,10 @@ func dbCompact(ctx *cli.Context) error { log.Info("Stats before compaction") showDBStats(db) + if stack.CheckIfMultiDataBase() { fmt.Println("show stats of state store") - showDBStats(db.StateStore()) - fmt.Println("show stats of block store") - showDBStats(db.BlockStore()) + showDBStats(db.GetStateStore()) } log.Info("Triggering compaction") @@ -654,11 +648,7 @@ func dbCompact(ctx *cli.Context) error { } if stack.CheckIfMultiDataBase() { - if err := db.StateStore().Compact(nil, nil); err != nil { - log.Error("Compact err", "error", err) - return err - } - if err := db.BlockStore().Compact(nil, nil); err != nil { + if err := db.GetStateStore().Compact(nil, nil); err != nil { log.Error("Compact err", "error", err) return err } @@ -668,9 +658,7 @@ func dbCompact(ctx *cli.Context) error { showDBStats(db) if stack.CheckIfMultiDataBase() { fmt.Println("show stats of state store after compaction") - showDBStats(db.StateStore()) - fmt.Println("show stats of block store after compaction") - showDBStats(db.BlockStore()) + showDBStats(db.GetStateStore()) } return nil } @@ -692,13 +680,8 @@ func dbGet(ctx *cli.Context) error { return err } opDb := db - if stack.CheckIfMultiDataBase() { - keyType := rawdb.DataTypeByKey(key) - if keyType == rawdb.StateDataType { - opDb = db.StateStore() - } else if keyType == rawdb.BlockDataType { - opDb = db.BlockStore() - } + if stack.CheckIfMultiDataBase() && rawdb.DataTypeByKey(key) == rawdb.StateDataType { + opDb = db.GetStateStore() } data, err := opDb.Get(key) @@ -720,11 +703,7 @@ func dbTrieGet(ctx *cli.Context) error { var db ethdb.Database chaindb := utils.MakeChainDatabase(ctx, stack, true, false) - if chaindb.StateStore() != nil { - db = chaindb.StateStore() - } else { - db = chaindb - } + db = chaindb.GetStateStore() defer chaindb.Close() scheme := ctx.String(utils.StateSchemeFlag.Name) @@ -792,11 +771,7 @@ func dbTrieDelete(ctx *cli.Context) error { var db ethdb.Database chaindb := utils.MakeChainDatabase(ctx, stack, true, false) - if chaindb.StateStore() != nil { - db = chaindb.StateStore() - } else { - db = chaindb - } + db = chaindb.GetStateStore() defer chaindb.Close() scheme := ctx.String(utils.StateSchemeFlag.Name) @@ -866,13 +841,8 @@ func dbDelete(ctx *cli.Context) error { return err } opDb := db - if stack.CheckIfMultiDataBase() { - keyType := rawdb.DataTypeByKey(key) - if keyType == rawdb.StateDataType { - opDb = db.StateStore() - } else if keyType == rawdb.BlockDataType { - opDb = db.BlockStore() - } + if opDb.HasSeparateStateStore() && rawdb.DataTypeByKey(key) == rawdb.StateDataType { + opDb = db.GetStateStore() } data, err := opDb.Get(key) @@ -904,7 +874,7 @@ func dbDeleteTrieState(ctx *cli.Context) error { ) // If separate trie db exists, delete all files in the db folder - if db.StateStore() != nil { + if db.HasSeparateStateStore() { statePath := filepath.Join(stack.ResolvePath("chaindata"), "state") log.Info("Removing separate trie database", "path", statePath) err = filepath.Walk(statePath, func(path string, info os.FileInfo, err error) error { @@ -991,13 +961,8 @@ func dbPut(ctx *cli.Context) error { } opDb := db - if stack.CheckIfMultiDataBase() { - keyType := rawdb.DataTypeByKey(key) - if keyType == rawdb.StateDataType { - opDb = db.StateStore() - } else if keyType == rawdb.BlockDataType { - opDb = db.BlockStore() - } + if db.HasSeparateStateStore() && rawdb.DataTypeByKey(key) == rawdb.StateDataType { + opDb = db.GetStateStore() } data, err = opDb.Get(key) @@ -1235,7 +1200,7 @@ func showMetaData(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, true, false) defer db.Close() - ancients, err := db.BlockStore().Ancients() + ancients, err := db.Ancients() if err != nil { fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err) } @@ -1282,17 +1247,12 @@ func hbss2pbss(ctx *cli.Context) error { defer stack.Close() db := utils.MakeChainDatabase(ctx, stack, false, false) - db.BlockStore().SyncAncient() - stateDiskDb := db.StateStore() + db.SyncAncient() defer db.Close() // convert hbss trie node to pbss trie node var lastStateID uint64 - if stateDiskDb != nil { - lastStateID = rawdb.ReadPersistentStateID(stateDiskDb) - } else { - lastStateID = rawdb.ReadPersistentStateID(db) - } + lastStateID = rawdb.ReadPersistentStateID(db.GetStateStore()) if lastStateID == 0 || force { config := triedb.HashDefaults triedb := triedb.NewDatabase(db, config) @@ -1343,19 +1303,14 @@ func hbss2pbss(ctx *cli.Context) error { log.Info("Convert hbss to pbss success. Nothing to do.") } - // repair state ancient offset - if stateDiskDb != nil { - lastStateID = rawdb.ReadPersistentStateID(stateDiskDb) - } else { - lastStateID = rawdb.ReadPersistentStateID(db) - } + lastStateID = rawdb.ReadPersistentStateID(db.GetStateStore()) if lastStateID == 0 { log.Error("Convert hbss to pbss trie node error. The last state id is still 0") } var ancient string - if db.StateStore() != nil { + if db.HasSeparateStateStore() { dirName := filepath.Join(stack.ResolvePath("chaindata"), "state") ancient = filepath.Join(dirName, "ancient") } else { @@ -1367,11 +1322,7 @@ func hbss2pbss(ctx *cli.Context) error { return err } // prune hbss trie node - if stateDiskDb != nil { - err = rawdb.PruneHashTrieNodeInDataBase(stateDiskDb) - } else { - err = rawdb.PruneHashTrieNodeInDataBase(db) - } + err = rawdb.PruneHashTrieNodeInDataBase(db.GetStateStore()) if err != nil { log.Error("Prune Hash trie node in database failed", "error", err) return err diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 930d6c42ae..4333d63fd0 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -99,8 +99,8 @@ var ( } MultiDataBaseFlag = &cli.BoolFlag{ Name: "multidatabase", - Usage: "Enable a separated state and block database, it will be created within two subdirectory called state and block, " + - "Users can copy this state or block directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata", + Usage: "Enable a separated state database, it will be created subdirectory called state, " + + "Users can copy this state directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata", Category: flags.EthCategory, } DirectBroadcastFlag = &cli.BoolFlag{ @@ -2567,8 +2567,6 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree if stack.CheckIfMultiDataBase() && err == nil { stateDiskDb := MakeStateDataBase(ctx, stack, readonly, false) chainDb.SetStateStore(stateDiskDb) - blockDb := MakeBlockDatabase(ctx, stack, readonly, false) - chainDb.SetBlockStore(blockDb) } } if err != nil { @@ -2588,17 +2586,6 @@ func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly, disableFree return statediskdb } -// MakeBlockDatabase open a separate block database using the flags passed to the client and will hard crash if it fails. -func MakeBlockDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database { - cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100 - handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 10 - blockDb, err := stack.OpenDatabaseWithFreezer("chaindata/block", cache, handles, "", "", readonly, disableFreeze) - if err != nil { - Fatalf("Failed to open separate block database: %v", err) - } - return blockDb -} - func PathDBConfigAddJournalFilePath(stack *node.Node, config *pathdb.Config) *pathdb.Config { path := fmt.Sprintf("%s/%s", stack.ResolvePath("chaindata"), eth.JournalFileName) config.JournalFilePath = path diff --git a/core/blockchain.go b/core/blockchain.go index 15a0dbfd59..50d6233129 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -473,8 +473,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis } } // Ensure that a previous crash in SetHead doesn't leave extra ancients - if frozen, err := bc.db.BlockStore().ItemAmountInAncient(); err == nil && frozen > 0 { - frozen, err = bc.db.BlockStore().Ancients() + if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 { + frozen, err = bc.db.Ancients() if err != nil { return nil, err } @@ -949,9 +949,9 @@ func (bc *BlockChain) rewindHead(head *types.Header, root common.Hash) (*types.H func (bc *BlockChain) SetFinalized(header *types.Header) { bc.currentFinalBlock.Store(header) if header != nil { - rawdb.WriteFinalizedBlockHash(bc.db.BlockStore(), header.Hash()) + rawdb.WriteFinalizedBlockHash(bc.db, header.Hash()) } else { - rawdb.WriteFinalizedBlockHash(bc.db.BlockStore(), common.Hash{}) + rawdb.WriteFinalizedBlockHash(bc.db, common.Hash{}) } } @@ -1046,7 +1046,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // intent afterwards is full block importing, delete the chain segment // between the stateful-block and the sethead target. var wipe bool - frozen, _ := bc.db.BlockStore().Ancients() + frozen, _ := bc.db.Ancients() if headNumber+1 < frozen { wipe = pivot == nil || headNumber >= *pivot } @@ -1055,7 +1055,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // Rewind the header chain, deleting all block bodies until then delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) { // Ignore the error here since light client won't hit this path - frozen, _ := bc.db.BlockStore().Ancients() + frozen, _ := bc.db.Ancients() if num+1 <= frozen { // The chain segment, such as the block header, canonical hash, // body, and receipt, will be removed from the ancient store @@ -1076,7 +1076,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha // If SetHead was only called as a chain reparation method, try to skip // touching the header chain altogether, unless the freezer is broken if repair { - if target, force := updateFn(bc.db.BlockStore(), bc.CurrentBlock()); force { + if target, force := updateFn(bc.db, bc.CurrentBlock()); force { bc.hc.SetHead(target.Number.Uint64(), nil, delFn) } } else { @@ -1173,7 +1173,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error { defer bc.chainmu.Unlock() // Prepare the genesis block and reinitialise the chain - blockBatch := bc.db.BlockStore().NewBatch() + blockBatch := bc.db.NewBatch() rawdb.WriteTd(blockBatch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) rawdb.WriteBlock(blockBatch, genesis) if err := blockBatch.Write(); err != nil { @@ -1243,7 +1243,7 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) { go func() { defer bc.dbWg.Done() // Add the block to the canonical chain number scheme and mark as the head - blockBatch := bc.db.BlockStore().NewBatch() + blockBatch := bc.db.NewBatch() rawdb.WriteCanonicalHash(blockBatch, block.Hash(), block.NumberU64()) rawdb.WriteHeadHeaderHash(blockBatch, block.Hash()) rawdb.WriteHeadBlockHash(blockBatch, block.Hash()) @@ -1346,7 +1346,7 @@ func (bc *BlockChain) Stop() { } else { rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64()) once.Do(func() { - rawdb.WriteHeadBlockHash(bc.db.BlockStore(), recent.Hash()) + rawdb.WriteHeadBlockHash(bc.db, recent.Hash()) }) } } @@ -1491,7 +1491,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ } else if !reorg { return false } - rawdb.WriteHeadFastBlockHash(bc.db.BlockStore(), head.Hash()) + rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) bc.currentSnapBlock.Store(head.Header()) headFastBlockGauge.Update(int64(head.NumberU64())) return true @@ -1508,9 +1508,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Ensure genesis is in ancients. if first.NumberU64() == 1 { - if frozen, _ := bc.db.BlockStore().Ancients(); frozen == 0 { + if frozen, _ := bc.db.Ancients(); frozen == 0 { td := bc.genesisBlock.Difficulty() - writeSize, err := rawdb.WriteAncientBlocks(bc.db.BlockStore(), []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td) + writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td) if err != nil { log.Error("Error writing genesis to ancients", "err", err) return 0, err @@ -1528,7 +1528,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Write all chain data to ancients. td := bc.GetTd(first.Hash(), first.NumberU64()) - writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db.BlockStore(), blockChain, receiptChain, td) + writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db, blockChain, receiptChain, td) if err != nil { log.Error("Error importing chain data to ancients", "err", err) return 0, err @@ -1536,7 +1536,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ size += writeSize // Sync the ancient store explicitly to ensure all data has been flushed to disk. - if err := bc.db.BlockStore().SyncAncient(); err != nil { + if err := bc.db.SyncAncient(); err != nil { return 0, err } // Update the current snap block because all block data is now present in DB. @@ -1544,7 +1544,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ if !updateHead(blockChain[len(blockChain)-1]) { // We end up here if the header chain has reorg'ed, and the blocks/receipts // don't match the canonical chain. - if _, err := bc.db.BlockStore().TruncateHead(previousSnapBlock + 1); err != nil { + if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil { log.Error("Can't truncate ancient store after failed insert", "err", err) } return 0, errSideChainReceipts @@ -1552,7 +1552,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Delete block data from the main database. var ( - blockBatch = bc.db.BlockStore().NewBatch() + blockBatch = bc.db.NewBatch() canonHashes = make(map[common.Hash]struct{}, len(blockChain)) ) for _, block := range blockChain { @@ -1564,7 +1564,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ rawdb.DeleteBlockWithoutNumber(blockBatch, block.Hash(), block.NumberU64()) } // Delete side chain hash-to-number mappings. - for _, nh := range rawdb.ReadAllHashesInRange(bc.db.BlockStore(), first.NumberU64(), last.NumberU64()) { + for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) { if _, canon := canonHashes[nh.Hash]; !canon { rawdb.DeleteHeader(blockBatch, nh.Hash, nh.Number) } @@ -1581,7 +1581,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ var ( skipPresenceCheck = false batch = bc.db.NewBatch() - blockBatch = bc.db.BlockStore().NewBatch() + blockBatch = bc.db.NewBatch() ) for i, block := range blockChain { // Short circuit insertion if shutting down or processing failed @@ -1689,7 +1689,7 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e if bc.insertStopped() { return errInsertionInterrupted } - blockBatch := bc.db.BlockStore().NewBatch() + blockBatch := bc.db.NewBatch() rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), td) rawdb.WriteBlock(blockBatch, block) // if cancun is enabled, here need to write sidecars too @@ -1735,7 +1735,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. defer wg.Wait() wg.Add(1) go func() { - blockBatch := bc.db.BlockStore().NewBatch() + blockBatch := bc.db.NewBatch() rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd) rawdb.WriteBlock(blockBatch, block) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) @@ -1743,8 +1743,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if bc.chainConfig.IsCancun(block.Number(), block.Time()) { rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars()) } - if bc.db.StateStore() != nil { - rawdb.WritePreimages(bc.db.StateStore(), statedb.Preimages()) + if bc.db.HasSeparateStateStore() { + rawdb.WritePreimages(bc.db.GetStateStore(), statedb.Preimages()) } else { rawdb.WritePreimages(blockBatch, statedb.Preimages()) } @@ -2152,7 +2152,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness // state, but if it's this special case here(skip reexecution) we will lose // the empty receipt entry. if len(block.Transactions()) == 0 { - rawdb.WriteReceipts(bc.db.BlockStore(), block.Hash(), block.NumberU64(), nil) + rawdb.WriteReceipts(bc.db, block.Hash(), block.NumberU64(), nil) } else { log.Error("Please file an issue, skip known block execution without receipt", "hash", block.Hash(), "number", block.NumberU64()) @@ -2804,7 +2804,7 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Header) error // transaction indexes, canonical chain indexes which above the head. var ( indexesBatch = bc.db.NewBatch() - blockBatch = bc.db.BlockStore().NewBatch() + blockBatch = bc.db.NewBatch() ) for _, tx := range types.HashDifference(deletedTxs, rebirthTxs) { rawdb.DeleteTxLookupEntry(indexesBatch, tx) @@ -3091,14 +3091,14 @@ func (bc *BlockChain) PruneBlockHistory(blockHistory uint64) error { return nil } pruneHeight := bestHeight - blockHistory - ancientHead, err := bc.db.BlockStore().Ancients() + ancientHead, err := bc.db.Ancients() if err != nil { return err } if pruneHeight > ancientHead { pruneHeight = ancientHead } - old, err := bc.db.BlockStore().TruncateTail(pruneHeight) + old, err := bc.db.TruncateTail(pruneHeight) if err != nil { return err } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index b0df521fa7..c45085ceb7 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -534,7 +534,7 @@ func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEve // AncientTail retrieves the tail the ancients blocks func (bc *BlockChain) AncientTail() (uint64, error) { - tail, err := bc.db.BlockStore().Tail() + tail, err := bc.db.Tail() if err != nil { return 0, err } diff --git a/core/genesis.go b/core/genesis.go index 21cab33803..b2ec6e5b6d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -585,13 +585,13 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo return nil, err } rawdb.WriteGenesisStateSpec(db, block.Hash(), blob) - rawdb.WriteTd(db.BlockStore(), block.Hash(), block.NumberU64(), block.Difficulty()) - rawdb.WriteBlock(db.BlockStore(), block) - rawdb.WriteReceipts(db.BlockStore(), block.Hash(), block.NumberU64(), nil) - rawdb.WriteCanonicalHash(db.BlockStore(), block.Hash(), block.NumberU64()) - rawdb.WriteHeadBlockHash(db.BlockStore(), block.Hash()) - rawdb.WriteHeadFastBlockHash(db.BlockStore(), block.Hash()) - rawdb.WriteHeadHeaderHash(db.BlockStore(), block.Hash()) + rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) + rawdb.WriteBlock(db, block) + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteHeadFastBlockHash(db, block.Hash()) + rawdb.WriteHeadHeaderHash(db, block.Hash()) rawdb.WriteChainConfig(db, block.Hash(), config) return block, nil } diff --git a/core/headerchain.go b/core/headerchain.go index cc0677fde6..b794386ebc 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -166,7 +166,7 @@ func (hc *HeaderChain) Reorg(headers []*types.Header) error { var ( first = headers[0] last = headers[len(headers)-1] - blockBatch = hc.chainDb.BlockStore().NewBatch() + blockBatch = hc.chainDb.NewBatch() ) if first.ParentHash != hc.currentHeaderHash { // Delete any canonical number assignments above the new head @@ -236,7 +236,7 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) { newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain parentKnown = true // Set to true to force hc.HasHeader check the first iteration - blockBatch = hc.chainDb.BlockStore().NewBatch() + blockBatch = hc.chainDb.NewBatch() ) for i, header := range headers { var hash common.Hash @@ -630,7 +630,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat } var ( parentHash common.Hash - blockBatch = hc.chainDb.BlockStore().NewBatch() + blockBatch = hc.chainDb.NewBatch() origin = true ) done := func(header *types.Header) bool { @@ -656,7 +656,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat // first then remove the relative data from the database. // // Update head first(head fast block, head full block) before deleting the data. - markerBatch := hc.chainDb.BlockStore().NewBatch() + markerBatch := hc.chainDb.NewBatch() if updateFn != nil { newHead, force := updateFn(markerBatch, parent) if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) { @@ -679,7 +679,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat // we don't end up with dangling daps in the database var nums []uint64 if origin { - for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb.BlockStore(), n)) > 0; n++ { + for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path } origin = false @@ -689,7 +689,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat // Remove the related data from the database on all sidechains for _, num := range nums { // Gather all the side fork hashes - hashes := rawdb.ReadAllHashes(hc.chainDb.BlockStore(), num) + hashes := rawdb.ReadAllHashes(hc.chainDb, num) if len(hashes) == 0 { // No hashes in the database whatsoever, probably frozen already hashes = append(hashes, hdr.Hash()) @@ -735,7 +735,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat // Truncate the excessive chain segment above the current chain head // in the ancient store. if header.Number.Uint64()+1 < frozen { - _, err := hc.chainDb.BlockStore().TruncateHead(header.Number.Uint64() + 1) + _, err := hc.chainDb.TruncateHead(header.Number.Uint64() + 1) if err != nil { log.Crit("Failed to truncate head block", "err", err) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 759ebbe56b..5b7d1674c9 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -33,23 +33,14 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// Support Multi-Database Based on Data Pattern, the Chaindata will be divided into three stores: BlockStore, StateStore, and ChainStore, -// according to data schema and read/write behavior. When using the following data interfaces, you should take note of the following: -// -// 1) Block-Related Data: For CanonicalHash, Header, Body, Td, Receipts, and BlobSidecars, the Write, Delete, and Iterator -// operations should carefully ensure that the database being used is BlockStore. -// 2) Meta-Related Data: For HeaderNumber, HeadHeaderHash, HeadBlockHash, HeadFastBlockHash, and FinalizedBlockHash, the -// Write and Delete operations should carefully ensure that the database being used is BlockStore. -// 3) Ancient Data: When using a multi-database, Ancient data will use the BlockStore. - // ReadCanonicalHash retrieves the hash assigned to a canonical block number. func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { data, _ = reader.Ancient(ChainFreezerHashTable, number) if len(data) == 0 { // Get it by hash from leveldb - data, _ = db.BlockStoreReader().Get(headerHashKey(number)) + data, _ = db.Get(headerHashKey(number)) } return nil }) @@ -152,8 +143,8 @@ func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int } // ReadHeaderNumber returns the header number assigned to a hash. -func ReadHeaderNumber(db ethdb.MultiDatabaseReader, hash common.Hash) *uint64 { - data, _ := db.BlockStoreReader().Get(headerNumberKey(hash)) +func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { + data, _ := db.Get(headerNumberKey(hash)) if len(data) != 8 { return nil } @@ -178,8 +169,8 @@ func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadHeadHeaderHash retrieves the hash of the current canonical head header. -func ReadHeadHeaderHash(db ethdb.MultiDatabaseReader) common.Hash { - data, _ := db.BlockStoreReader().Get(headHeaderKey) +func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash { + data, _ := db.Get(headHeaderKey) if len(data) == 0 { return common.Hash{} } @@ -194,8 +185,8 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadHeadBlockHash retrieves the hash of the current canonical head block. -func ReadHeadBlockHash(db ethdb.MultiDatabaseReader) common.Hash { - data, _ := db.BlockStoreReader().Get(headBlockKey) +func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { + data, _ := db.Get(headBlockKey) if len(data) == 0 { return common.Hash{} } @@ -210,8 +201,8 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. -func ReadHeadFastBlockHash(db ethdb.MultiDatabaseReader) common.Hash { - data, _ := db.BlockStoreReader().Get(headFastBlockKey) +func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash { + data, _ := db.Get(headFastBlockKey) if len(data) == 0 { return common.Hash{} } @@ -226,8 +217,8 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadFinalizedBlockHash retrieves the hash of the finalized block. -func ReadFinalizedBlockHash(db ethdb.MultiDatabaseReader) common.Hash { - data, _ := db.BlockStoreReader().Get(headFinalizedBlockKey) +func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash { + data, _ := db.Get(headFinalizedBlockKey) if len(data) == 0 { return common.Hash{} } @@ -305,13 +296,13 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu // It's ok to request block 0, 1 item count = number + 1 } - limit, _ := db.BlockStoreReader().Ancients() + limit, _ := db.Ancients() // First read live blocks if i >= limit { // If we need to read live blocks, we need to figure out the hash first hash := ReadCanonicalHash(db, number) for ; i >= limit && count > 0; i-- { - if data, _ := db.BlockStoreReader().Get(headerKey(i, hash)); len(data) > 0 { + if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 { rlpHeaders = append(rlpHeaders, data) // Get the parent hash for next query hash = types.HeaderParentHashFromRLP(data) @@ -325,7 +316,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu return rlpHeaders } // read remaining from ancients, cap at 2M - data, err := db.BlockStoreReader().AncientRange(ChainFreezerHeaderTable, i+1-count, count, 2*1024*1024) + data, err := db.AncientRange(ChainFreezerHeaderTable, i+1-count, count, 2*1024*1024) if err != nil { log.Error("Failed to read headers from freezer", "err", err) return rlpHeaders @@ -344,7 +335,7 @@ func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValu // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // First try to look up the data in ancient database. Extra hash // comparison is necessary since ancient database only maintains // the canonical data. @@ -353,7 +344,7 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu return nil } // If not, try reading from leveldb - data, _ = db.BlockStoreReader().Get(headerKey(number, hash)) + data, _ = db.Get(headerKey(number, hash)) return nil }) return data @@ -361,10 +352,10 @@ func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValu // HasHeader verifies the existence of a block header corresponding to the hash. func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { - if isCanon(db.BlockStoreReader(), number, hash) { + if isCanon(db, number, hash) { return true } - if has, err := db.BlockStoreReader().Has(headerKey(number, hash)); !has || err != nil { + if has, err := db.Has(headerKey(number, hash)); !has || err != nil { return false } return true @@ -451,14 +442,14 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue // comparison is necessary since ancient database only maintains // the canonical data. var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { data, _ = reader.Ancient(ChainFreezerBodiesTable, number) return nil } // If not, try reading from leveldb - data, _ = db.BlockStoreReader().Get(blockBodyKey(number, hash)) + data, _ = db.Get(blockBodyKey(number, hash)) return nil }) return data @@ -468,7 +459,7 @@ func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue // block at number, in RLP encoding. func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { data, _ = reader.Ancient(ChainFreezerBodiesTable, number) if len(data) > 0 { return nil @@ -476,8 +467,8 @@ func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { // Block is not in ancients, read from leveldb by hash and number. // Note: ReadCanonicalHash cannot be used here because it also // calls ReadAncients internally. - hash, _ := db.BlockStoreReader().Get(headerHashKey(number)) - data, _ = db.BlockStoreReader().Get(blockBodyKey(number, common.BytesToHash(hash))) + hash, _ := db.Get(headerHashKey(number)) + data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash))) return nil }) return data @@ -492,10 +483,10 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp // HasBody verifies the existence of a block body corresponding to the hash. func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { - if isCanon(db.BlockStoreReader(), number, hash) { + if isCanon(db, number, hash) { return true } - if has, err := db.BlockStoreReader().Has(blockBodyKey(number, hash)); !has || err != nil { + if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { return false } return true @@ -534,14 +525,14 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { data, _ = reader.Ancient(ChainFreezerDifficultyTable, number) return nil } // If not, try reading from leveldb - data, _ = db.BlockStoreReader().Get(headerTDKey(number, hash)) + data, _ = db.Get(headerTDKey(number, hash)) return nil }) return data @@ -582,10 +573,10 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // HasReceipts verifies the existence of all the transaction receipts belonging // to a block. func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { - if isCanon(db.BlockStoreReader(), number, hash) { + if isCanon(db, number, hash) { return true } - if has, err := db.BlockStoreReader().Has(blockReceiptsKey(number, hash)); !has || err != nil { + if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { return false } return true @@ -594,14 +585,14 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { data, _ = reader.Ancient(ChainFreezerReceiptTable, number) return nil } // If not, try reading from leveldb - data, _ = db.BlockStoreReader().Get(blockReceiptsKey(number, hash)) + data, _ = db.Get(blockReceiptsKey(number, hash)) return nil }) return data @@ -833,14 +824,14 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts // ReadBlobSidecarsRLP retrieves all the transaction blobs belonging to a block in RLP encoding. func ReadBlobSidecarsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { var data []byte - db.BlockStoreReader().ReadAncients(func(reader ethdb.AncientReaderOp) error { + db.ReadAncients(func(reader ethdb.AncientReaderOp) error { // Check if the data is in ancients if isCanon(reader, number, hash) { data, _ = reader.Ancient(ChainFreezerBlobSidecarTable, number) return nil } // If not, try reading from leveldb - data, _ = db.BlockStoreReader().Get(blockBlobSidecarsKey(number, hash)) + data, _ = db.Get(blockBlobSidecarsKey(number, hash)) return nil }) return data diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index f999c6979f..72da0de2fb 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -93,7 +93,7 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { infos = append(infos, info) case MerkleStateFreezerName, VerkleStateFreezerName: - if db.StateStore() != nil { + if db.HasSeparateStateStore() { continue } datadir, err := db.AncientDatadir() diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index f82df9d969..90eb638506 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -35,16 +35,16 @@ import ( // injects into the database the block hash->number mappings. func InitDatabaseFromFreezer(db ethdb.Database) { // If we can't access the freezer or it's empty, abort - frozen, err := db.BlockStore().ItemAmountInAncient() + frozen, err := db.ItemAmountInAncient() if err != nil || frozen == 0 { return } var ( - batch = db.BlockStore().NewBatch() + batch = db.NewBatch() start = time.Now() logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log hash common.Hash - offset = db.BlockStore().AncientOffSet() + offset = db.AncientOffSet() ) for i := uint64(0) + offset; i < frozen+offset; i++ { // We read 100K hashes at a time, for a total of 3.2M @@ -52,7 +52,7 @@ func InitDatabaseFromFreezer(db ethdb.Database) { if i+count > frozen+offset { count = frozen + offset - i } - data, err := db.BlockStore().AncientRange(ChainFreezerHashTable, i, count, 32*count) + data, err := db.AncientRange(ChainFreezerHashTable, i, count, 32*count) if err != nil { log.Crit("Failed to init database from freezer", "err", err) } @@ -80,8 +80,8 @@ func InitDatabaseFromFreezer(db ethdb.Database) { } batch.Reset() - WriteHeadHeaderHash(db.BlockStore(), hash) - WriteHeadFastBlockHash(db.BlockStore(), hash) + WriteHeadHeaderHash(db, hash) + WriteHeadFastBlockHash(db, hash) log.Info("Initialized database from freezer", "blocks", frozen, "elapsed", common.PrettyDuration(time.Since(start))) } @@ -100,7 +100,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool number uint64 rlp rlp.RawValue } - if offset := db.BlockStore().AncientOffSet(); offset > from { + if offset := db.AncientOffSet(); offset > from { from = offset } if to <= from { @@ -187,7 +187,7 @@ func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool // signal received. func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) { // short circuit for invalid range - if offset := db.BlockStore().AncientOffSet(); offset > from { + if offset := db.AncientOffSet(); offset > from { from = offset } if from >= to { @@ -286,7 +286,7 @@ func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, inte // signal received. func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool, report bool) { // short circuit for invalid range - if offset := db.BlockStore().AncientOffSet(); offset > from { + if offset := db.AncientOffSet(); offset > from { from = offset } if from >= to { diff --git a/core/rawdb/database.go b/core/rawdb/database.go index ffbaa623ad..e5fce8db92 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -42,7 +42,6 @@ type freezerdb struct { ethdb.AncientFreezer stateStore ethdb.Database - blockStore ethdb.Database } func (frdb *freezerdb) StateStoreReader() ethdb.Reader { @@ -52,13 +51,6 @@ func (frdb *freezerdb) StateStoreReader() ethdb.Reader { return frdb.stateStore } -func (frdb *freezerdb) BlockStoreReader() ethdb.Reader { - if frdb.blockStore == nil { - return frdb - } - return frdb.blockStore -} - // AncientDatadir returns the path of root ancient directory. func (frdb *freezerdb) AncientDatadir() (string, error) { return frdb.ancientRoot, nil @@ -74,13 +66,8 @@ func (frdb *freezerdb) Close() error { if err := frdb.KeyValueStore.Close(); err != nil { errs = append(errs, err) } - if frdb.stateStore != nil { - if err := frdb.stateStore.Close(); err != nil { - errs = append(errs, err) - } - } - if frdb.blockStore != nil { - if err := frdb.blockStore.Close(); err != nil { + if frdb.HasSeparateStateStore() { + if err := frdb.GetStateStore().Close(); err != nil { errs = append(errs, err) } } @@ -90,17 +77,6 @@ func (frdb *freezerdb) Close() error { return nil } -func (frdb *freezerdb) StateStore() ethdb.Database { - return frdb.stateStore -} - -func (frdb *freezerdb) GetStateStore() ethdb.Database { - if frdb.stateStore != nil { - return frdb.stateStore - } - return frdb -} - func (frdb *freezerdb) SetStateStore(state ethdb.Database) { if frdb.stateStore != nil { frdb.stateStore.Close() @@ -108,23 +84,15 @@ func (frdb *freezerdb) SetStateStore(state ethdb.Database) { frdb.stateStore = state } -func (frdb *freezerdb) BlockStore() ethdb.Database { - if frdb.blockStore != nil { - return frdb.blockStore - } else { - return frdb - } -} - -func (frdb *freezerdb) SetBlockStore(block ethdb.Database) { - if frdb.blockStore != nil { - frdb.blockStore.Close() +func (frdb *freezerdb) GetStateStore() ethdb.Database { + if frdb.stateStore != nil { + return frdb.stateStore } - frdb.blockStore = block + return frdb } -func (frdb *freezerdb) HasSeparateBlockStore() bool { - return frdb.blockStore != nil +func (frdb *freezerdb) HasSeparateStateStore() bool { + return frdb.stateStore != nil } // Freeze is a helper method used for external testing to trigger and block until @@ -154,7 +122,6 @@ func (frdb *freezerdb) SetupFreezerEnv(env *ethdb.FreezerEnv, blockHistory uint6 type nofreezedb struct { ethdb.KeyValueStore stateStore ethdb.Database - blockStore ethdb.Database } // HasAncient returns an error as we don't have a backing chain freezer. @@ -222,10 +189,6 @@ func (db *nofreezedb) SyncAncient() error { return errNotSupported } -func (db *nofreezedb) StateStore() ethdb.Database { - return db.stateStore -} - func (db *nofreezedb) SetStateStore(state ethdb.Database) { db.stateStore = state } @@ -237,6 +200,10 @@ func (db *nofreezedb) GetStateStore() ethdb.Database { return db } +func (db *nofreezedb) HasSeparateStateStore() bool { + return db.stateStore != nil +} + func (db *nofreezedb) StateStoreReader() ethdb.Reader { if db.stateStore != nil { return db.stateStore @@ -244,28 +211,6 @@ func (db *nofreezedb) StateStoreReader() ethdb.Reader { return db } -func (db *nofreezedb) BlockStore() ethdb.Database { - if db.blockStore != nil { - return db.blockStore - } - return db -} - -func (db *nofreezedb) SetBlockStore(block ethdb.Database) { - db.blockStore = block -} - -func (db *nofreezedb) HasSeparateBlockStore() bool { - return db.blockStore != nil -} - -func (db *nofreezedb) BlockStoreReader() ethdb.Reader { - if db.blockStore != nil { - return db.blockStore - } - return db -} - func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { // Unlike other ancient-related methods, this method does not return // errNotSupported when invoked. @@ -370,14 +315,10 @@ func (db *emptyfreezedb) SyncAncient() error { return nil } -func (db *emptyfreezedb) StateStore() ethdb.Database { return db } func (db *emptyfreezedb) GetStateStore() ethdb.Database { return db } func (db *emptyfreezedb) SetStateStore(state ethdb.Database) {} func (db *emptyfreezedb) StateStoreReader() ethdb.Reader { return db } -func (db *emptyfreezedb) BlockStore() ethdb.Database { return db } -func (db *emptyfreezedb) SetBlockStore(block ethdb.Database) {} -func (db *emptyfreezedb) HasSeparateBlockStore() bool { return false } -func (db *emptyfreezedb) BlockStoreReader() ethdb.Reader { return db } +func (db *emptyfreezedb) HasSeparateStateStore() bool { return false } func (db *emptyfreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { return nil } @@ -626,11 +567,11 @@ func (s *stat) Count() string { } func AncientInspect(db ethdb.Database) error { - ancientTail, err := db.BlockStore().Tail() + ancientTail, err := db.Tail() if err != nil { return err } - ancientHead, err := db.BlockStore().Ancients() + ancientHead, err := db.Ancients() if err != nil { return err } @@ -674,7 +615,6 @@ type DataType int const ( StateDataType DataType = iota - BlockDataType ChainDataType Unknown ) @@ -688,14 +628,6 @@ func DataTypeByKey(key []byte) DataType { IsStorageTrieNode(key): return StateDataType - // block - case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength), - bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength), - bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength), - bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix), - bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix), - bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): - return BlockDataType default: for _, meta := range [][]byte{ fastTrieProgressKey, persistentStateIDKey, trieJournalKey, snapSyncStatusFlagKey} { @@ -703,11 +635,6 @@ func DataTypeByKey(key []byte) DataType { return StateDataType } } - for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} { - if bytes.Equal(key, meta) { - return BlockDataType - } - } return ChainDataType } } @@ -719,15 +646,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { defer it.Release() var trieIter ethdb.Iterator - var blockIter ethdb.Iterator - if db.StateStore() != nil { - trieIter = db.StateStore().NewIterator(keyPrefix, nil) + if db.HasSeparateStateStore() { + trieIter = db.GetStateStore().NewIterator(keyPrefix, nil) defer trieIter.Release() } - if db.HasSeparateBlockStore() { - blockIter = db.BlockStore().NewIterator(keyPrefix, nil) - defer blockIter.Release() - } + var ( count int64 start = time.Now() @@ -915,55 +838,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { } log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) } - // inspect separate block db - if blockIter != nil { - count = 0 - logged = time.Now() - - for blockIter.Next() { - var ( - key = blockIter.Key() - value = blockIter.Value() - size = common.StorageSize(len(key) + len(value)) - ) - total += size - - switch { - case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): - headers.Add(size) - case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): - bodies.Add(size) - case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): - receipts.Add(size) - case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): - tds.Add(size) - case bytes.HasPrefix(key, BlockBlobSidecarsPrefix): - blobSidecars.Add(size) - case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): - numHashPairings.Add(size) - case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): - hashNumPairings.Add(size) - default: - var accounted bool - for _, meta := range [][]byte{headHeaderKey, headFinalizedBlockKey, headBlockKey, headFastBlockKey} { - if bytes.Equal(key, meta) { - metadata.Add(size) - accounted = true - break - } - } - if !accounted { - unaccounted.Add(size) - } - } - count++ - if count%1000 == 0 && time.Since(logged) > 8*time.Second { - log.Info("Inspecting separate block database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) - logged = time.Now() - } - } - log.Info("Inspecting separate block database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) - } // Display the database statistic of key-value store. stats := [][]string{ {"Key-Value store", "Headers", headers.Size(), headers.Count()}, @@ -992,7 +866,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()}, } // Inspect all registered append-only file store then. - ancients, err := inspectFreezers(db.BlockStore()) + ancients, err := inspectFreezers(db) if err != nil { return err } @@ -1010,7 +884,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { // inspect ancient state in separate trie db if exist if trieIter != nil { - stateAncients, err := inspectFreezers(db.StateStore()) + stateAncients, err := inspectFreezers(db.GetStateStore()) if err != nil { return err } diff --git a/core/rawdb/table.go b/core/rawdb/table.go index ebc0673a58..8ce9ce0d1f 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -27,22 +27,6 @@ type table struct { prefix string } -func (t *table) BlockStoreReader() ethdb.Reader { - return t -} - -func (t *table) BlockStore() ethdb.Database { - return t -} - -func (t *table) SetBlockStore(block ethdb.Database) { - panic("not implement") -} - -func (t *table) HasSeparateBlockStore() bool { - panic("not implement") -} - // NewTable returns a database object that prefixes all keys with a given string. func NewTable(db ethdb.Database, prefix string) ethdb.Database { return &table{ @@ -237,10 +221,6 @@ func (t *table) NewBatch() ethdb.Batch { return &tableBatch{t.db.NewBatch(), t.prefix} } -func (t *table) StateStore() ethdb.Database { - return nil -} - func (t *table) SetStateStore(state ethdb.Database) { panic("not implement") } @@ -249,6 +229,10 @@ func (t *table) GetStateStore() ethdb.Database { return nil } +func (t *table) HasSeparateStateStore() bool { + return false +} + func (t *table) StateStoreReader() ethdb.Reader { return nil } diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 54ffc9965f..5ca8e4220b 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -140,8 +140,8 @@ func (p *Pruner) PruneAll(genesis *core.Genesis) error { func (p *Pruner) pruneAll(maindb ethdb.Database, g *core.Genesis) error { var pruneDB ethdb.Database - if maindb != nil && maindb.StateStore() != nil { - pruneDB = maindb.StateStore() + if maindb != nil && maindb.HasSeparateStateStore() { + pruneDB = maindb.GetStateStore() } else { pruneDB = maindb } @@ -236,8 +236,8 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta // dangling node is the state root is super low. So the dangling nodes in // theory will never ever be visited again. var pruneDB ethdb.Database - if maindb != nil && maindb.StateStore() != nil { - pruneDB = maindb.StateStore() + if maindb != nil && maindb.HasSeparateStateStore() { + pruneDB = maindb.GetStateStore() } else { pruneDB = maindb } @@ -386,8 +386,8 @@ func (p *Pruner) Prune(root common.Hash) error { } // if the separated state db has been set, use this db to prune data var trienodedb ethdb.Database - if p.db != nil && p.db.StateStore() != nil { - trienodedb = p.db.StateStore() + if p.db != nil && p.db.HasSeparateStateStore() { + trienodedb = p.db.GetStateStore() } else { trienodedb = p.db } diff --git a/eth/backend.go b/eth/backend.go index 06fca4f52d..965603e8d0 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -232,9 +232,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // startup ancient freeze freezeDb := chainDb - if stack.CheckIfMultiDataBase() { - freezeDb = chainDb.BlockStore() - } if err = freezeDb.SetupFreezerEnv(ðdb.FreezerEnv{ ChainCfg: chainConfig, BlobExtraReserve: config.BlobExtraReserve, diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 50e20a4536..1801ee09ea 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -557,8 +557,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd * } else { d.ancientLimit = 0 } - frozen, _ := d.stateDB.BlockStore().Ancients() // Ignore the error here since light client can also hit here. - itemAmountInAncient, _ := d.stateDB.BlockStore().ItemAmountInAncient() + frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. + itemAmountInAncient, _ := d.stateDB.ItemAmountInAncient() // If a part of blockchain data has already been written into active store, // disable the ancient style insertion explicitly. if origin >= frozen && itemAmountInAncient != 0 { @@ -1645,9 +1645,9 @@ func (d *Downloader) reportSnapSyncProgress(force bool) { } // Don't report anything until we have a meaningful progress var ( - headerBytes, _ = d.stateDB.BlockStore().AncientSize(rawdb.ChainFreezerHeaderTable) - bodyBytes, _ = d.stateDB.BlockStore().AncientSize(rawdb.ChainFreezerBodiesTable) - receiptBytes, _ = d.stateDB.BlockStore().AncientSize(rawdb.ChainFreezerReceiptTable) + headerBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable) + bodyBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable) + receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable) ) syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes) if syncedBytes == 0 { diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 37c481e1a6..b6d56232a9 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -2049,7 +2049,7 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { }, } var snapBatch ethdb.HookedBatch - if s.db.StateStore() != nil { + if s.db.HasSeparateStateStore() { usingMultDatabase = true snapBatch = ethdb.HookedBatch{ Batch: s.db.NewBatch(), @@ -2380,8 +2380,8 @@ func (s *Syncer) commitHealer(force bool) { batch := s.db.NewBatch() var stateBatch ethdb.Batch var err error - if s.db.StateStore() != nil { - stateBatch = s.db.StateStore().NewBatch() + if s.db.HasSeparateStateStore() { + stateBatch = s.db.GetStateStore().NewBatch() err = s.healer.scheduler.Commit(batch, stateBatch) } else { err = s.healer.scheduler.Commit(batch, nil) @@ -2392,7 +2392,7 @@ func (s *Syncer) commitHealer(force bool) { if err := batch.Write(); err != nil { log.Crit("Failed to persist healing data", "err", err) } - if s.db.StateStore() != nil { + if s.db.HasSeparateStateStore() { if err := stateBatch.Write(); err != nil { log.Crit("Failed to persist healing data", "err", err) } diff --git a/ethdb/database.go b/ethdb/database.go index de258b80d1..59e4f95917 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -190,29 +190,17 @@ type AncientStater interface { AncientDatadir() (string, error) } +// StateStoreReader wraps the StateStoreReader method. type StateStoreReader interface { StateStoreReader() Reader } -type BlockStoreReader interface { - BlockStoreReader() Reader -} - -// MultiDatabaseReader contains the methods required to read data from both key-value as well as -// blockStore or stateStore. -type MultiDatabaseReader interface { - KeyValueReader - StateStoreReader - BlockStoreReader -} - // Reader contains the methods required to read data from both key-value as well as // immutable ancient data. type Reader interface { KeyValueReader AncientReader StateStoreReader - BlockStoreReader } // AncientStore contains all the methods required to allow handling different @@ -225,15 +213,9 @@ type AncientStore interface { } type StateStore interface { - StateStore() Database SetStateStore(state Database) GetStateStore() Database -} - -type BlockStore interface { - BlockStore() Database - SetBlockStore(block Database) - HasSeparateBlockStore() bool + HasSeparateStateStore() bool } // ResettableAncientStore extends the AncientStore interface by adding a Reset method. @@ -248,9 +230,7 @@ type ResettableAncientStore interface { // only access the key-value data store but also the ancient chain store. type Database interface { StateStore - BlockStore StateStoreReader - BlockStoreReader AncientFreezer KeyValueStore diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index 920dbbbb55..63bc81eb01 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -45,7 +45,6 @@ type Database struct { lock sync.RWMutex stateStore ethdb.Database - blockStore ethdb.Database } func (db *Database) ModifyAncients(f func(ethdb.AncientWriteOp) error) (int64, error) { @@ -301,13 +300,6 @@ func (db *Database) StateStoreReader() ethdb.Reader { return db.stateStore } -func (db *Database) BlockStoreReader() ethdb.Reader { - if db.blockStore == nil { - return db - } - return db.blockStore -} - // keyvalue is a key-value tuple tagged with a deletion field to allow creating // memory-database write batches. type keyvalue struct { diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go index 11f4ab8c9b..88e4b01de4 100644 --- a/ethdb/remotedb/remotedb.go +++ b/ethdb/remotedb/remotedb.go @@ -32,22 +32,6 @@ type Database struct { remote *rpc.Client } -func (db *Database) BlockStoreReader() ethdb.Reader { - return db -} - -func (db *Database) BlockStore() ethdb.Database { - return db -} - -func (db *Database) HasSeparateBlockStore() bool { - return false -} - -func (db *Database) SetBlockStore(block ethdb.Database) { - panic("not supported") -} - func (db *Database) Has(key []byte) (bool, error) { if _, err := db.Get(key); err != nil { return false, nil @@ -102,15 +86,15 @@ func (db *Database) AncientSize(kind string) (uint64, error) { panic("not supported") } -func (db *Database) StateStore() ethdb.Database { +func (db *Database) SetStateStore(state ethdb.Database) { panic("not supported") } -func (db *Database) SetStateStore(state ethdb.Database) { +func (db *Database) GetStateStore() ethdb.Database { panic("not supported") } -func (db *Database) GetStateStore() ethdb.Database { +func (db *Database) HasSeparateStateStore() bool { panic("not supported") } diff --git a/internal/ethapi/dbapi.go b/internal/ethapi/dbapi.go index b891091b94..33fda936dc 100644 --- a/internal/ethapi/dbapi.go +++ b/internal/ethapi/dbapi.go @@ -33,11 +33,11 @@ func (api *DebugAPI) DbGet(key string) (hexutil.Bytes, error) { // DbAncient retrieves an ancient binary blob from the append-only immutable files. // It is a mapping to the `AncientReaderOp.Ancient` method func (api *DebugAPI) DbAncient(kind string, number uint64) (hexutil.Bytes, error) { - return api.b.ChainDb().BlockStore().Ancient(kind, number) + return api.b.ChainDb().Ancient(kind, number) } // DbAncients returns the ancient item numbers in the ancient store. // It is a mapping to the `AncientReaderOp.Ancients` method func (api *DebugAPI) DbAncients() (uint64, error) { - return api.b.ChainDb().BlockStore().Ancients() + return api.b.ChainDb().Ancients() } diff --git a/node/node.go b/node/node.go index 7e013f2acb..70f3b40c10 100644 --- a/node/node.go +++ b/node/node.go @@ -75,9 +75,6 @@ const ( initializingState = iota runningState closedState - blockDbCacheSize = 256 - blockDbHandlesMinSize = 1000 - blockDbHandlesMaxSize = 2000 chainDbMemoryPercentage = 50 chainDbHandlesPercentage = 50 ) @@ -777,9 +774,7 @@ func (n *Node) OpenAndMergeDatabase(name string, namespace string, readonly bool var ( err error stateDiskDb ethdb.Database - blockDb ethdb.Database disableChainDbFreeze = false - blockDbHandlesSize int chainDataHandles = config.DatabaseHandles chainDbCache = config.DatabaseCache stateDbCache, stateDbHandles int @@ -790,17 +785,12 @@ func (n *Node) OpenAndMergeDatabase(name string, namespace string, readonly bool if isMultiDatabase { // Resource allocation rules: // 1) Allocate a fixed percentage of memory for chainDb based on chainDbMemoryPercentage & chainDbHandlesPercentage. - // 2) Allocate a fixed size for blockDb based on blockDbCacheSize & blockDbHandlesSize. - // 3) Allocate the remaining resources to stateDb. + // 2) Allocate the remaining resources to stateDb. chainDbCache = int(float64(config.DatabaseCache) * chainDbMemoryPercentage / 100) chainDataHandles = int(float64(config.DatabaseHandles) * chainDbHandlesPercentage / 100) - if config.DatabaseHandles/10 > blockDbHandlesMaxSize { - blockDbHandlesSize = blockDbHandlesMaxSize - } else { - blockDbHandlesSize = blockDbHandlesMinSize - } - stateDbCache = config.DatabaseCache - chainDbCache - blockDbCacheSize - stateDbHandles = config.DatabaseHandles - chainDataHandles - blockDbHandlesSize + + stateDbCache = config.DatabaseCache - chainDbCache + stateDbHandles = config.DatabaseHandles - chainDataHandles disableChainDbFreeze = true } @@ -816,13 +806,8 @@ func (n *Node) OpenAndMergeDatabase(name string, namespace string, readonly bool return nil, err } - blockDb, err = n.OpenDatabaseWithFreezer(name+"/block", blockDbCacheSize, blockDbHandlesSize, "", "eth/db/blockdata/", readonly, false) - if err != nil { - return nil, err - } log.Warn("Multi-database is an experimental feature") chainDB.SetStateStore(stateDiskDb) - chainDB.SetBlockStore(blockDb) } return chainDB, nil @@ -863,29 +848,15 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, // CheckIfMultiDataBase check the state and block subdirectory of db, if subdirectory exists, return true func (n *Node) CheckIfMultiDataBase() bool { - var ( - stateExist = true - blockExist = true - ) + stateExist := true separateStateDir := filepath.Join(n.ResolvePath("chaindata"), "state") fileInfo, stateErr := os.Stat(separateStateDir) if os.IsNotExist(stateErr) || !fileInfo.IsDir() { stateExist = false } - separateBlockDir := filepath.Join(n.ResolvePath("chaindata"), "block") - blockFileInfo, blockErr := os.Stat(separateBlockDir) - if os.IsNotExist(blockErr) || !blockFileInfo.IsDir() { - blockExist = false - } - if stateExist && blockExist { - return true - } else if !stateExist && !blockExist { - return false - } else { - panic("data corruption! missing block or state dir.") - } + return stateExist } // ResolvePath returns the absolute path of a resource in the instance directory. diff --git a/trie/hbss2pbss.go b/trie/hbss2pbss.go index b728bdc2a6..0c2d90adf4 100644 --- a/trie/hbss2pbss.go +++ b/trie/hbss2pbss.go @@ -68,10 +68,10 @@ func (t *Trie) resloveWithoutTrack(n node, prefix []byte) (node, error) { func (h2p *Hbss2Pbss) writeNode(pathKey []byte, n *trienode.Node, owner common.Hash) { if owner == (common.Hash{}) { - rawdb.WriteAccountTrieNode(h2p.db.Disk(), pathKey, n.Blob) + rawdb.WriteAccountTrieNode(h2p.db.Disk().GetStateStore(), pathKey, n.Blob) log.Debug("WriteNodes account node, ", "path: ", common.Bytes2Hex(pathKey), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob)) } else { - rawdb.WriteStorageTrieNode(h2p.db.Disk(), owner, pathKey, n.Blob) + rawdb.WriteStorageTrieNode(h2p.db.Disk().GetStateStore(), owner, pathKey, n.Blob) log.Debug("WriteNodes storage node, ", "path: ", common.Bytes2Hex(pathKey), "owner: ", owner.String(), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob)) } } @@ -85,8 +85,8 @@ func (h2p *Hbss2Pbss) Run() { log.Info("Total", "complete", h2p.totalNum, "go routines Num", runtime.NumGoroutine, "h2p concurrentQueue", len(h2p.concurrentQueue)) - rawdb.WritePersistentStateID(h2p.db.Disk(), h2p.blocknum) - rawdb.WriteStateID(h2p.db.Disk(), h2p.stateRootHash, h2p.blocknum) + rawdb.WritePersistentStateID(h2p.db.Disk().GetStateStore(), h2p.blocknum) + rawdb.WriteStateID(h2p.db.Disk().GetStateStore(), h2p.stateRootHash, h2p.blocknum) } func (h2p *Hbss2Pbss) SubConcurrentTraversal(theTrie *Trie, theNode node, path []byte) { diff --git a/triedb/database.go b/triedb/database.go index 3568fa8872..da4e2b9ceb 100644 --- a/triedb/database.go +++ b/triedb/database.go @@ -97,11 +97,10 @@ type Database struct { func NewDatabase(diskdb ethdb.Database, config *Config) *Database { // Sanitize the config and use the default one if it's not specified. var triediskdb ethdb.Database - if diskdb != nil && diskdb.StateStore() != nil { - triediskdb = diskdb.StateStore() - } else { - triediskdb = diskdb + if diskdb != nil { + triediskdb = diskdb.GetStateStore() } + dbScheme := rawdb.ReadStateScheme(diskdb) if config == nil { if dbScheme == rawdb.PathScheme {