Skip to content

Commit

Permalink
refine lock and fix racying issue (bnb-chain#18)
Browse files Browse the repository at this point in the history
Co-authored-by: Sunny <[email protected]>
  • Loading branch information
DavidZangNR and sunny2022da committed Aug 13, 2024
1 parent b2e4403 commit 7630e2a
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 6 deletions.
4 changes: 2 additions & 2 deletions core/state/journal.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,9 @@ func (ch resetObjectChange) revert(dber StateDBer) {
}

if !ch.prevdestruct {
s.snapParallelLock.Lock()
s.stateObjectDestructLock.Lock()
s.removeStateObjectsDestruct(ch.prev.address)
s.snapParallelLock.Unlock()
s.stateObjectDestructLock.Unlock()
}
if ch.prevAccount != nil {
s.accounts[ch.prev.addrHash] = ch.prevAccount
Expand Down
4 changes: 3 additions & 1 deletion core/state/parallel_statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1463,10 +1463,11 @@ func (s *ParallelStateDB) FinaliseForParallel(deleteEmptyObjects bool, mainDB *S
// We need to maintain account deletions explicitly (will remain
// set indefinitely). Note only the first occurred self-destruct
// event is tracked.
mainDB.stateObjectDestructLock.Lock()
if _, ok := mainDB.stateObjectsDestruct[obj.address]; !ok {
mainDB.stateObjectsDestruct[obj.address] = obj.origin
}

mainDB.stateObjectDestructLock.Unlock()
// Note, we can't do this only at the end of a block because multiple
// transactions within the same block might self destruct and then
// resurrect an account; but the snapshotter needs both events.
Expand Down Expand Up @@ -1524,6 +1525,7 @@ func (s *ParallelStateDB) FinaliseForParallel(deleteEmptyObjects bool, mainDB *S
// We need to maintain account deletions explicitly (will remain
// set indefinitely). Note only the first occurred self-destruct
// event is tracked.
// This is the thread local one, no need to acquire the stateObjectsDestructLock.
if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
s.stateObjectsDestruct[obj.address] = obj.origin
}
Expand Down
9 changes: 6 additions & 3 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -383,12 +383,12 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash {
// have been handles via pendingStorage above.
// 2) we don't have new values, and can deliver empty response back
//if _, destructed := s.db.stateObjectsDestruct[s.address]; destructed {
s.db.snapParallelLock.RLock()
s.db.stateObjectDestructLock.RLock()
if _, destructed := s.db.getStateObjectsDegetstruct(s.address); destructed { // fixme: use sync.Map, instead of RWMutex?
s.db.snapParallelLock.RUnlock()
s.db.stateObjectDestructLock.RUnlock()
return common.Hash{}
}
s.db.snapParallelLock.RUnlock()
s.db.stateObjectDestructLock.RUnlock()

// If no live objects are available, attempt to use snapshots
var (
Expand Down Expand Up @@ -468,6 +468,8 @@ func (s *stateObject) setState(key, value common.Hash) {
// finalise moves all dirty storage slots into the pending area to be hashed or
// committed later. It is invoked at the end of every transaction.
func (s *stateObject) finalise(prefetch bool) {
s.storageRecordsLock.Lock()
defer s.storageRecordsLock.Unlock()
slotsToPrefetch := make([][]byte, 0, s.dirtyStorage.Length())
s.dirtyStorage.Range(func(key, value interface{}) bool {
s.pendingStorage.StoreValue(key.(common.Hash), value.(common.Hash))
Expand All @@ -478,6 +480,7 @@ func (s *stateObject) finalise(prefetch bool) {
}
return true
})

if s.dirtyNonce != nil {
s.data.Nonce = *s.dirtyNonce
s.dirtyNonce = nil
Expand Down
11 changes: 11 additions & 0 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,9 @@ func (s *StateDB) storeStateObj(addr common.Address, stateObject *stateObject) {
// deleteStateObj is the entry for deleting state object to stateObjects in StateDB or stateObjects in parallel
func (s *StateDB) deleteStateObj(addr common.Address) {
if s.isParallel {
s.parallelStateAccessLock.Lock()
s.parallel.stateObjects.Delete(addr)
s.parallelStateAccessLock.Unlock()
} else {
delete(s.stateObjects, addr)
}
Expand Down Expand Up @@ -196,6 +198,7 @@ type StateDB struct {
parallelStateAccessLock sync.RWMutex
snapParallelLock sync.RWMutex // for parallel mode, for main StateDB, slot will read snapshot, while processor will write.
trieParallelLock sync.Mutex // for parallel mode of trie, mostly for get states/objects from trie, lock required to handle trie tracer.
stateObjectDestructLock sync.RWMutex // for parallel mode, used in mainDB for mergeSlot and conflict check.
snapDestructs map[common.Address]struct{}
snapAccounts map[common.Address][]byte
snapStorage map[common.Address]map[string][]byte
Expand Down Expand Up @@ -1024,10 +1027,12 @@ func (s *StateDB) createObject(addr common.Address) (newobj *stateObject) {
// account and storage data should be cleared as well. Note, it must
// be done here, otherwise the destruction event of "original account"
// will be lost.
s.stateObjectDestructLock.Lock()
_, prevdestruct := s.getStateObjectsDegetstruct(prev.address)
if !prevdestruct {
s.setStateObjectsDestruct(prev.address, prev.origin)
}
s.stateObjectDestructLock.Unlock()
// There may be some cached account/storage data already since IntermediateRoot
// will be called for each transaction before byzantium fork which will always
// cache the latest account/storage data.
Expand Down Expand Up @@ -1539,6 +1544,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))

// finalise stateObjectsDestruct
// The finalise of stateDB is called at verify & commit phase, which is global, no need to acquire the lock.
for addr, acc := range s.stateObjectsDestructDirty {
s.stateObjectsDestruct[addr] = acc
}
Expand Down Expand Up @@ -1573,6 +1579,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// We need to maintain account deletions explicitly (will remain
// set indefinitely). Note only the first occurred self-destruct
// event is tracked.
// The finalise of stateDB is called at verify & commit phase, which is global, no need to acquire the lock.
if _, ok := s.stateObjectsDestruct[obj.address]; !ok {
s.stateObjectsDestruct[obj.address] = obj.origin
}
Expand Down Expand Up @@ -1946,6 +1953,7 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A
return incomplete, nil
}

// Commit phase, no need to acquire lock.
for addr, prev := range s.stateObjectsDestruct {
// The original account was non-existing, and it's marked as destructed
// in the scope of block. It can be case (a) or (b).
Expand Down Expand Up @@ -2605,6 +2613,7 @@ func (s *StateDB) AddrPrefetch(slotDb *ParallelStateDB) {
// finalized(dirty -> pending) on execution slot, the execution results should be
// merged back to the main StateDB.
func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receipt, txIndex int, fees *DelayedGasFee) *StateDB {

s.SetTxContext(slotDb.thash, slotDb.txIndex)

for s.nextRevisionId < slotDb.nextRevisionId {
Expand Down Expand Up @@ -2782,11 +2791,13 @@ func (s *StateDB) MergeSlotDB(slotDb *ParallelStateDB, slotReceipt *types.Receip
}
}

s.stateObjectDestructLock.Lock()
for addr := range slotDb.stateObjectsDestruct {
if acc, exist := s.stateObjectsDestruct[addr]; !exist {
s.stateObjectsDestruct[addr] = acc
}
}
s.stateObjectDestructLock.Unlock()
// slotDb.logs: logs will be kept in receipts, no need to do merge
for hash, preimage := range slotDb.preimages {
s.preimages[hash] = preimage
Expand Down

0 comments on commit 7630e2a

Please sign in to comment.