Skip to content

Commit

Permalink
fix testcase and lint
Browse files Browse the repository at this point in the history
make diff block configable

wait code write

fix testcase

resolve comments

resolve comment
  • Loading branch information
unclezoro committed Sep 26, 2021
1 parent 1fbecfa commit 92e21cc
Show file tree
Hide file tree
Showing 35 changed files with 159 additions and 302 deletions.
4 changes: 2 additions & 2 deletions cmd/faucet/faucet.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func main() {
log.Crit("Length of bep2eContracts, bep2eSymbols, bep2eAmounts mismatch")
}

bep2eInfos := make(map[string]bep2eInfo, 0)
bep2eInfos := make(map[string]bep2eInfo, len(symbols))
for idx, s := range symbols {
n, ok := big.NewInt(0).SetString(bep2eNumAmounts[idx], 10)
if !ok {
Expand All @@ -148,7 +148,7 @@ func main() {
amountStr := big.NewFloat(0).Quo(big.NewFloat(0).SetInt(n), big.NewFloat(0).SetInt64(params.Ether)).String()

bep2eInfos[s] = bep2eInfo{
Contract: common.HexToAddress(contracts[idx]),
Contract: common.HexToAddreparlia.goss(contracts[idx]),
Amount: *n,
AmountStr: amountStr,
}
Expand Down
1 change: 1 addition & 0 deletions cmd/geth/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ var (
utils.CacheSnapshotFlag,
utils.CachePreimagesFlag,
utils.PersistDiffFlag,
utils.DiffBlockFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
Expand Down
8 changes: 8 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -442,6 +442,11 @@ var (
Name: "persistdiff",
Usage: "Enable persistence of the diff layer",
}
DiffBlockFlag = cli.Uint64Flag{
Name: "diffblock",
Usage: "The number of blocks should be persisted in db (default = 864000 )",
Value: uint64(864000),
}
// Miner settings
MiningEnabledFlag = cli.BoolFlag{
Name: "mine",
Expand Down Expand Up @@ -1590,6 +1595,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(PersistDiffFlag.Name) {
cfg.PersistDiff = ctx.GlobalBool(PersistDiffFlag.Name)
}
if ctx.GlobalIsSet(DiffBlockFlag.Name) {
cfg.DiffBlock = ctx.GlobalUint64(DiffBlockFlag.Name)
}
if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
Expand Down
6 changes: 3 additions & 3 deletions consensus/parlia/parlia.go
Original file line number Diff line number Diff line change
Expand Up @@ -800,8 +800,9 @@ func (p *Parlia) Delay(chain consensus.ChainReader, header *types.Header) *time.
}
delay := p.delayForRamanujanFork(snap, header)
// The blocking time should be no more than half of period
if delay > time.Duration(p.config.Period)*time.Second/2 {
delay = time.Duration(p.config.Period) * time.Second / 2
half := time.Duration(p.config.Period) * time.Second / 2
if delay > half {
delay = half
}
return &delay
}
Expand Down Expand Up @@ -895,7 +896,6 @@ func (p *Parlia) AllowLightProcess(chain consensus.ChainReader, currentHeader *t
idx := snap.indexOfVal(p.val)
// validator is not allowed to diff sync
return idx < 0

}

func (p *Parlia) IsLocalBlock(header *types.Header) bool {
Expand Down
2 changes: 1 addition & 1 deletion consensus/parlia/ramanujanfork.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func (p *Parlia) delayForRamanujanFork(snap *Snapshot, header *types.Header) tim
if header.Difficulty.Cmp(diffNoTurn) == 0 {
// It's not our turn explicitly to sign, delay it a bit
wiggle := time.Duration(len(snap.Validators)/2+1) * wiggleTimeBeforeFork
delay += time.Duration(fixedBackOffTimeBeforeFork) + time.Duration(rand.Int63n(int64(wiggle)))
delay += fixedBackOffTimeBeforeFork + time.Duration(rand.Int63n(int64(wiggle)))
}
return delay
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/parlia/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ func (s *Snapshot) enoughDistance(validator common.Address, header *types.Header
if validator == header.Coinbase {
return false
}
offset := (int64(s.Number) + 1) % int64(validatorNum)
offset := (int64(s.Number) + 1) % validatorNum
if int64(idx) >= offset {
return int64(idx)-offset >= validatorNum-2
} else {
Expand Down
47 changes: 26 additions & 21 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ const (
maxBeyondBlocks = 2048

diffLayerFreezerRecheckInterval = 3 * time.Second
diffLayerFreezerBlockLimit = 864000 // The number of diff layers that should be kept in disk.
diffLayerPruneRecheckInterval = 1 * time.Second // The interval to prune unverified diff layers
maxDiffQueueDist = 2048 // Maximum allowed distance from the chain head to queue diffLayers
maxDiffLimit = 2048 // Maximum number of unique diff layers a peer may have responded
Expand Down Expand Up @@ -213,9 +212,11 @@ type BlockChain struct {
futureBlocks *lru.Cache // future blocks are blocks added for later processing

// trusted diff layers
diffLayerCache *lru.Cache // Cache for the diffLayers
diffLayerRLPCache *lru.Cache // Cache for the rlp encoded diffLayers
diffQueue *prque.Prque // A Priority queue to store recent diff layer
diffLayerCache *lru.Cache // Cache for the diffLayers
diffLayerRLPCache *lru.Cache // Cache for the rlp encoded diffLayers
diffQueue *prque.Prque // A Priority queue to store recent diff layer
diffQueueBuffer chan *types.DiffLayer
diffLayerFreezerBlockLimit uint64

// untrusted diff layers
diffMux sync.RWMutex
Expand Down Expand Up @@ -285,6 +286,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
engine: engine,
vmConfig: vmConfig,
diffQueue: prque.New(nil),
diffQueueBuffer: make(chan *types.DiffLayer),
blockHashToDiffLayers: make(map[common.Hash]map[common.Hash]*types.DiffLayer),
diffHashToBlockHash: make(map[common.Hash]common.Hash),
diffHashToPeers: make(map[common.Hash]map[string]struct{}),
Expand Down Expand Up @@ -444,7 +446,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
// Need persist and prune diff layer
if bc.db.DiffStore() != nil {
go bc.trustedDiffLayerFreezeLoop()
go bc.trustedDiffLayerLoop()
}
go bc.untrustedDiffLayerPruneLoop()

Expand All @@ -464,7 +466,7 @@ func (bc *BlockChain) cacheDiffLayer(diffLayer *types.DiffLayer) {
bc.diffLayerCache.Add(diffLayer.BlockHash, diffLayer)
if bc.db.DiffStore() != nil {
// push to priority queue before persisting
bc.diffQueue.Push(diffLayer, -(int64(diffLayer.Number)))
bc.diffQueueBuffer <- diffLayer
}
}

Expand Down Expand Up @@ -967,7 +969,7 @@ func (bc *BlockChain) GetDiffLayerRLP(blockHash common.Hash) rlp.RawValue {
}
rawData := rawdb.ReadDiffLayerRLP(diffStore, blockHash)
if len(rawData) != 0 {
bc.diffLayerRLPCache.Add(blockHash, rlp.RawValue(rawData))
bc.diffLayerRLPCache.Add(blockHash, rawData)
}
return rawData
}
Expand Down Expand Up @@ -2009,8 +2011,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates

blockExecutionTimer.Update(time.Since(substart))

Expand Down Expand Up @@ -2401,12 +2401,14 @@ func (bc *BlockChain) update() {
}
}

func (bc *BlockChain) trustedDiffLayerFreezeLoop() {
func (bc *BlockChain) trustedDiffLayerLoop() {
recheck := time.Tick(diffLayerFreezerRecheckInterval)
bc.wg.Add(1)
defer bc.wg.Done()
for {
select {
case diff := <-bc.diffQueueBuffer:
bc.diffQueue.Push(diff, -(int64(diff.Number)))
case <-bc.quit:
// Persist all diffLayers when shutdown, it will introduce redundant storage, but it is acceptable.
// If the client been ungracefully shutdown, it will missing all cached diff layers, it is acceptable as well.
Expand Down Expand Up @@ -2451,7 +2453,7 @@ func (bc *BlockChain) trustedDiffLayerFreezeLoop() {
batch = bc.db.DiffStore().NewBatch()
}
rawdb.WriteDiffLayer(batch, diffLayer.BlockHash, diffLayer)
staleHash := bc.GetCanonicalHash(uint64(-prio) - diffLayerFreezerBlockLimit)
staleHash := bc.GetCanonicalHash(uint64(-prio) - bc.diffLayerFreezerBlockLimit)
rawdb.DeleteDiffLayer(batch, staleHash)
}
} else {
Expand Down Expand Up @@ -2511,16 +2513,12 @@ func (bc *BlockChain) removeDiffLayers(diffHash common.Hash) {
// Untrusted peers
pids := bc.diffHashToPeers[diffHash]
invalidDiffHashes := make(map[common.Hash]struct{})
if pids != nil {
for pid := range pids {
invaliDiffHashesPeer := bc.diffPeersToDiffHashes[pid]
if invaliDiffHashesPeer != nil {
for invaliDiffHash := range invaliDiffHashesPeer {
invalidDiffHashes[invaliDiffHash] = struct{}{}
}
}
delete(bc.diffPeersToDiffHashes, pid)
for pid := range pids {
invaliDiffHashesPeer := bc.diffPeersToDiffHashes[pid]
for invaliDiffHash := range invaliDiffHashesPeer {
invalidDiffHashes[invaliDiffHash] = struct{}{}
}
delete(bc.diffPeersToDiffHashes, pid)
}
for invalidDiffHash := range invalidDiffHashes {
delete(bc.diffHashToPeers, invalidDiffHash)
Expand Down Expand Up @@ -2602,7 +2600,7 @@ func (bc *BlockChain) pruneDiffLayer() {
break
}
}
staleDiffHashes := make(map[common.Hash]struct{}, 0)
staleDiffHashes := make(map[common.Hash]struct{})
for blockHash := range staleBlockHashes {
if diffHashes, exist := bc.blockHashToDiffLayers[blockHash]; exist {
for diffHash := range diffHashes {
Expand Down Expand Up @@ -2932,3 +2930,10 @@ func EnableLightProcessor(bc *BlockChain) *BlockChain {
bc.processor = NewLightStateProcessor(bc.Config(), bc, bc.engine)
return bc
}

func EnablePersistDiff(limit uint64) BlockChainOption {
return func(chain *BlockChain) *BlockChain {
chain.diffLayerFreezerBlockLimit = limit
return chain
}
}
2 changes: 1 addition & 1 deletion core/blockchain_diff_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func newTestBackendWithGenerator(blocks int, lightProcess bool) *testBackend {
Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
}).MustCommit(db)

chain, _ := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
chain, _ := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil, EnablePersistDiff(860000))
generator := func(i int, block *BlockGen) {
// The chain maker doesn't have access to a chain, so the difficulty will be
// lets unset (nil). Set it here to the correct value.
Expand Down
4 changes: 2 additions & 2 deletions core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1769,7 +1769,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
}

lastPrunedIndex := len(blocks) - TestTriesInMemory - 1
lastPrunedBlock := blocks[lastPrunedIndex]
lastPrunedBlock := blocks[lastPrunedIndex-1]
firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory]

// Verify pruning of lastPrunedBlock
Expand Down Expand Up @@ -2420,7 +2420,7 @@ func TestSideImportPrunedBlocks(t *testing.T) {
}

lastPrunedIndex := len(blocks) - TestTriesInMemory - 1
lastPrunedBlock := blocks[lastPrunedIndex]
lastPrunedBlock := blocks[lastPrunedIndex-1]

// Verify pruning of lastPrunedBlock
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
Expand Down
60 changes: 3 additions & 57 deletions core/rawdb/freezer_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@ package rawdb

import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -528,7 +525,6 @@ func TestOffset(t *testing.T) {

f.Append(4, getChunk(20, 0xbb))
f.Append(5, getChunk(20, 0xaa))
f.DumpIndex(0, 100)
f.Close()
}
// Now crop it.
Expand Down Expand Up @@ -575,7 +571,6 @@ func TestOffset(t *testing.T) {
if err != nil {
t.Fatal(err)
}
f.DumpIndex(0, 100)
// It should allow writing item 6
f.Append(numDeleted+2, getChunk(20, 0x99))

Expand Down Expand Up @@ -640,55 +635,6 @@ func TestOffset(t *testing.T) {
// 1. have data files d0, d1, d2, d3
// 2. remove d2,d3
//
// However, all 'normal' failure modes arising due to failing to sync() or save a file
// should be handled already, and the case described above can only (?) happen if an
// external process/user deletes files from the filesystem.

// TestAppendTruncateParallel is a test to check if the Append/truncate operations are
// racy.
//
// The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent
// on timing rather than 'clever' input -- there's no determinism.
func TestAppendTruncateParallel(t *testing.T) {
dir, err := ioutil.TempDir("", "freezer")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true)
if err != nil {
t.Fatal(err)
}

fill := func(mark uint64) []byte {
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, mark)
return data
}

for i := 0; i < 5000; i++ {
f.truncate(0)
data0 := fill(0)
f.Append(0, data0)
data1 := fill(1)

var wg sync.WaitGroup
wg.Add(2)
go func() {
f.truncate(0)
wg.Done()
}()
go func() {
f.Append(1, data1)
wg.Done()
}()
wg.Wait()

if have, err := f.Retrieve(0); err == nil {
if !bytes.Equal(have, data0) {
t.Fatalf("have %x want %x", have, data0)
}
}
}
}
// However, all 'normal' failure modes arising due to failing to sync() or save a file should be
// handled already, and the case described above can only (?) happen if an external process/user
// deletes files from the filesystem.
1 change: 0 additions & 1 deletion core/state/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,6 @@ func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie
triesArray := [3]*triePair{{root: root, trie: tr.ResetCopy()}, nil, nil}
db.storageTrieCache.Add(addrHash, triesArray)
}
return
}

func (db *cachingDB) Purge() {
Expand Down
8 changes: 4 additions & 4 deletions core/state/snapshot/disklayer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func TestDiskMerge(t *testing.T) {
base.Storage(conNukeCache, conNukeCacheSlot)

// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
if err := snaps.update(diffRoot, baseRoot, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
Expand Down Expand Up @@ -344,7 +344,7 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])

// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
if err := snaps.update(diffRoot, baseRoot, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
Expand Down Expand Up @@ -466,7 +466,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
},
}
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
if err := snaps.update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
accTwo: accTwo[:],
}, nil); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
Expand All @@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}
// Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
if err := snaps.update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
accThree: accThree.Bytes(),
}, map[common.Hash]map[common.Hash][]byte{
accThree: {accThreeSlot: accThreeSlot.Bytes()},
Expand Down
Loading

0 comments on commit 92e21cc

Please sign in to comment.