Skip to content

Commit

Permalink
Revert "fix: the bug of blobsidecars and downloader with multi-databa…
Browse files Browse the repository at this point in the history
…se (bnb-chain#2564)"

This reverts commit 87e622e.
  • Loading branch information
flywukong committed Jul 23, 2024
1 parent c6cb43b commit 1fc1e35
Show file tree
Hide file tree
Showing 25 changed files with 188 additions and 371 deletions.
3 changes: 1 addition & 2 deletions cmd/geth/chaincmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ var (
utils.CachePreimagesFlag,
utils.OverrideBohr,
utils.OverrideVerkle,
utils.MultiDataBaseFlag,
}, utils.DatabaseFlags),
Description: `
The init command initializes a new genesis block and definition for the network.
Expand Down Expand Up @@ -760,7 +759,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
arg := ctx.Args().First()
if hashish(arg) {
hash := common.HexToHash(arg)
if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
if number := rawdb.ReadHeaderNumber(db.BlockStore(), hash); number != nil {
header = rawdb.ReadHeader(db, hash, *number)
} else {
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
Expand Down
12 changes: 6 additions & 6 deletions cmd/geth/dbcmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,8 @@ func inspectTrie(ctx *cli.Context) error {
var headerBlockHash common.Hash
if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" {
headerHash := rawdb.ReadHeadHeaderHash(db)
blockNumber = *(rawdb.ReadHeaderNumber(db, headerHash))
headerHash := rawdb.ReadHeadHeaderHash(db.BlockStore())
blockNumber = *(rawdb.ReadHeaderNumber(db.BlockStore(), headerHash))
} else if ctx.Args().Get(0) == "snapshot" {
trieRootHash = rawdb.ReadSnapshotRoot(db)
blockNumber = math.MaxUint64
Expand Down Expand Up @@ -1212,7 +1212,7 @@ func showMetaData(ctx *cli.Context) error {
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
data := rawdb.ReadChainMetadata(db)
data := rawdb.ReadChainMetadataFromMultiDatabase(db)
data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
if b := rawdb.ReadHeadBlock(db); b != nil {
Expand Down Expand Up @@ -1255,7 +1255,7 @@ func hbss2pbss(ctx *cli.Context) error {
defer stack.Close()

db := utils.MakeChainDatabase(ctx, stack, false, false)
db.BlockStore().Sync()
db.Sync()
stateDiskDb := db.StateStore()
defer db.Close()

Expand All @@ -1273,8 +1273,8 @@ func hbss2pbss(ctx *cli.Context) error {
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
defer triedb.Close()

headerHash := rawdb.ReadHeadHeaderHash(db)
blockNumber := rawdb.ReadHeaderNumber(db, headerHash)
headerHash := rawdb.ReadHeadHeaderHash(db.BlockStore())
blockNumber := rawdb.ReadHeaderNumber(db.BlockStore(), headerHash)
if blockNumber == nil {
log.Error("read header number failed.")
return fmt.Errorf("read header number failed")
Expand Down
1 change: 0 additions & 1 deletion cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@ var (
utils.CacheSnapshotFlag,
// utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.MultiDataBaseFlag,
utils.PersistDiffFlag,
utils.DiffBlockFlag,
utils.PruneAncientDataFlag,
Expand Down
1 change: 1 addition & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -1153,6 +1153,7 @@ var (
DBEngineFlag,
StateSchemeFlag,
HttpHeaderFlag,
MultiDataBaseFlag,
}
)

Expand Down
30 changes: 15 additions & 15 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -462,8 +462,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
}
}
// Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.BlockStore().ItemAmountInAncient(); err == nil && frozen > 0 {
frozen, err = bc.db.BlockStore().Ancients()
if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 {
frozen, err = bc.db.Ancients()
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -663,7 +663,7 @@ func (bc *BlockChain) cacheDiffLayer(diffLayer *types.DiffLayer, diffLayerCh cha
// into node seamlessly.
func (bc *BlockChain) empty() bool {
genesis := bc.genesisBlock.Hash()
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db), rawdb.ReadHeadHeaderHash(bc.db), rawdb.ReadHeadFastBlockHash(bc.db)} {
for _, hash := range []common.Hash{rawdb.ReadHeadBlockHash(bc.db.BlockStore()), rawdb.ReadHeadHeaderHash(bc.db.BlockStore()), rawdb.ReadHeadFastBlockHash(bc.db.BlockStore())} {
if hash != genesis {
return false
}
Expand Down Expand Up @@ -699,7 +699,7 @@ func (bc *BlockChain) getFinalizedNumber(header *types.Header) uint64 {
// assumes that the chain manager mutex is held.
func (bc *BlockChain) loadLastState() error {
// Restore the last known head block
head := rawdb.ReadHeadBlockHash(bc.db)
head := rawdb.ReadHeadBlockHash(bc.db.BlockStore())
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
log.Warn("Empty database, resetting chain")
Expand All @@ -721,7 +721,7 @@ func (bc *BlockChain) loadLastState() error {

// Restore the last known head header
headHeader := headBlock.Header()
if head := rawdb.ReadHeadHeaderHash(bc.db); head != (common.Hash{}) {
if head := rawdb.ReadHeadHeaderHash(bc.db.BlockStore()); head != (common.Hash{}) {
if header := bc.GetHeaderByHash(head); header != nil {
headHeader = header
}
Expand All @@ -732,7 +732,7 @@ func (bc *BlockChain) loadLastState() error {
bc.currentSnapBlock.Store(headBlock.Header())
headFastBlockGauge.Update(int64(headBlock.NumberU64()))

if head := rawdb.ReadHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if head := rawdb.ReadHeadFastBlockHash(bc.db.BlockStore()); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentSnapBlock.Store(block.Header())
headFastBlockGauge.Update(int64(block.NumberU64()))
Expand Down Expand Up @@ -1100,7 +1100,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// intent afterwards is full block importing, delete the chain segment
// between the stateful-block and the sethead target.
var wipe bool
frozen, _ := bc.db.BlockStore().Ancients()
frozen, _ := bc.db.Ancients()
if headNumber+1 < frozen {
wipe = pivot == nil || headNumber >= *pivot
}
Expand All @@ -1109,11 +1109,11 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
// Rewind the header chain, deleting all block bodies until then
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
// Ignore the error here since light client won't hit this path
frozen, _ := bc.db.BlockStore().Ancients()
frozen, _ := bc.db.Ancients()
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
if _, err := bc.db.BlockStore().TruncateHead(num); err != nil {
if _, err := bc.db.TruncateHead(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
Expand Down Expand Up @@ -1556,9 +1556,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [

// Ensure genesis is in ancients.
if first.NumberU64() == 1 {
if frozen, _ := bc.db.BlockStore().Ancients(); frozen == 0 {
if frozen, _ := bc.db.Ancients(); frozen == 0 {
td := bc.genesisBlock.Difficulty()
writeSize, err := rawdb.WriteAncientBlocks(bc.db.BlockStore(), []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
if err != nil {
log.Error("Error writing genesis to ancients", "err", err)
return 0, err
Expand All @@ -1576,23 +1576,23 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [

// Write all chain data to ancients.
td := bc.GetTd(first.Hash(), first.NumberU64())
writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db.BlockStore(), blockChain, receiptChain, td)
writeSize, err := rawdb.WriteAncientBlocksWithBlobs(bc.db, blockChain, receiptChain, td)
if err != nil {
log.Error("Error importing chain data to ancients", "err", err)
return 0, err
}
size += writeSize

// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.BlockStore().Sync(); err != nil {
if err := bc.db.Sync(); err != nil {
return 0, err
}
// Update the current snap block because all block data is now present in DB.
previousSnapBlock := bc.CurrentSnapBlock().Number.Uint64()
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
if _, err := bc.db.BlockStore().TruncateHead(previousSnapBlock + 1); err != nil {
if _, err := bc.db.TruncateHead(previousSnapBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
Expand All @@ -1612,7 +1612,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
rawdb.DeleteBlockWithoutNumber(blockBatch, block.Hash(), block.NumberU64())
}
// Delete side chain hash-to-number mappings.
for _, nh := range rawdb.ReadAllHashesInRange(bc.db.BlockStore(), first.NumberU64(), last.NumberU64()) {
for _, nh := range rawdb.ReadAllHashesInRange(bc.db, first.NumberU64(), last.NumberU64()) {
if _, canon := canonHashes[nh.Hash]; !canon {
rawdb.DeleteHeader(blockBatch, nh.Hash, nh.Number)
}
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
number := rawdb.ReadHeaderNumber(bc.db.BlockStore(), hash)
if number == nil {
return nil
}
Expand Down Expand Up @@ -514,7 +514,7 @@ func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEve

// AncientTail retrieves the tail the ancients blocks
func (bc *BlockChain) AncientTail() (uint64, error) {
tail, err := bc.db.BlockStore().Tail()
tail, err := bc.db.Tail()
if err != nil {
return 0, err
}
Expand Down
4 changes: 2 additions & 2 deletions core/chain_indexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,8 +227,8 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainH
// Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then)
// TODO(karalabe, zsfelfoldi): This seems a bit brittle, can we detect this case explicitly?

if rawdb.ReadCanonicalHash(c.chainDb, prevHeader.Number.Uint64()) != prevHash {
if h := rawdb.FindCommonAncestor(c.chainDb, prevHeader, header); h != nil {
if rawdb.ReadCanonicalHash(c.chainDb.BlockStore(), prevHeader.Number.Uint64()) != prevHash {
if h := rawdb.FindCommonAncestor(c.chainDb.BlockStore(), prevHeader, header); h != nil {
c.newHead(h.Number.Uint64(), true)
}
}
Expand Down
8 changes: 4 additions & 4 deletions core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
return nil, ErrNoGenesis
}
hc.currentHeader.Store(hc.genesisHeader)
if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
if head := rawdb.ReadHeadBlockHash(chainDb.BlockStore()); head != (common.Hash{}) {
if chead := hc.GetHeaderByHash(head); chead != nil {
hc.currentHeader.Store(chead)
}
Expand Down Expand Up @@ -144,7 +144,7 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
if cached, ok := hc.numberCache.Get(hash); ok {
return &cached
}
number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
number := rawdb.ReadHeaderNumber(hc.chainDb.BlockStore(), hash)
if number != nil {
hc.numberCache.Add(hash, *number)
}
Expand Down Expand Up @@ -691,7 +691,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
// we don't end up with dangling daps in the database
var nums []uint64
if origin {
for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb.BlockStore(), n)) > 0; n++ {
for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
}
origin = false
Expand All @@ -701,7 +701,7 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
// Remove the related data from the database on all sidechains
for _, num := range nums {
// Gather all the side fork hashes
hashes := rawdb.ReadAllHashes(hc.chainDb.BlockStore(), num)
hashes := rawdb.ReadAllHashes(hc.chainDb, num)
if len(hashes) == 0 {
// No hashes in the database whatsoever, probably frozen already
hashes = append(hashes, hdr.Hash())
Expand Down
Loading

0 comments on commit 1fc1e35

Please sign in to comment.