Skip to content

Commit

Permalink
fix testcase and lint
Browse files Browse the repository at this point in the history
  • Loading branch information
realuncle committed Sep 24, 2021
1 parent 1fbecfa commit 8cc1c3d
Show file tree
Hide file tree
Showing 25 changed files with 95 additions and 270 deletions.
2 changes: 1 addition & 1 deletion cmd/faucet/faucet.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func main() {
log.Crit("Length of bep2eContracts, bep2eSymbols, bep2eAmounts mismatch")
}

bep2eInfos := make(map[string]bep2eInfo, 0)
bep2eInfos := make(map[string]bep2eInfo)
for idx, s := range symbols {
n, ok := big.NewInt(0).SetString(bep2eNumAmounts[idx], 10)
if !ok {
Expand Down
2 changes: 1 addition & 1 deletion consensus/parlia/ramanujanfork.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func (p *Parlia) delayForRamanujanFork(snap *Snapshot, header *types.Header) tim
if header.Difficulty.Cmp(diffNoTurn) == 0 {
// It's not our turn explicitly to sign, delay it a bit
wiggle := time.Duration(len(snap.Validators)/2+1) * wiggleTimeBeforeFork
delay += time.Duration(fixedBackOffTimeBeforeFork) + time.Duration(rand.Int63n(int64(wiggle)))
delay += fixedBackOffTimeBeforeFork + time.Duration(rand.Int63n(int64(wiggle)))
}
return delay
}
Expand Down
2 changes: 1 addition & 1 deletion consensus/parlia/snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ func (s *Snapshot) enoughDistance(validator common.Address, header *types.Header
if validator == header.Coinbase {
return false
}
offset := (int64(s.Number) + 1) % int64(validatorNum)
offset := (int64(s.Number) + 1) % validatorNum
if int64(idx) >= offset {
return int64(idx)-offset >= validatorNum-2
} else {
Expand Down
20 changes: 7 additions & 13 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -967,7 +967,7 @@ func (bc *BlockChain) GetDiffLayerRLP(blockHash common.Hash) rlp.RawValue {
}
rawData := rawdb.ReadDiffLayerRLP(diffStore, blockHash)
if len(rawData) != 0 {
bc.diffLayerRLPCache.Add(blockHash, rlp.RawValue(rawData))
bc.diffLayerRLPCache.Add(blockHash, rawData)
}
return rawData
}
Expand Down Expand Up @@ -2009,8 +2009,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them
trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates
trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates

blockExecutionTimer.Update(time.Since(substart))

Expand Down Expand Up @@ -2511,16 +2509,12 @@ func (bc *BlockChain) removeDiffLayers(diffHash common.Hash) {
// Untrusted peers
pids := bc.diffHashToPeers[diffHash]
invalidDiffHashes := make(map[common.Hash]struct{})
if pids != nil {
for pid := range pids {
invaliDiffHashesPeer := bc.diffPeersToDiffHashes[pid]
if invaliDiffHashesPeer != nil {
for invaliDiffHash := range invaliDiffHashesPeer {
invalidDiffHashes[invaliDiffHash] = struct{}{}
}
}
delete(bc.diffPeersToDiffHashes, pid)
for pid := range pids {
invaliDiffHashesPeer := bc.diffPeersToDiffHashes[pid]
for invaliDiffHash := range invaliDiffHashesPeer {
invalidDiffHashes[invaliDiffHash] = struct{}{}
}
delete(bc.diffPeersToDiffHashes, pid)
}
for invalidDiffHash := range invalidDiffHashes {
delete(bc.diffHashToPeers, invalidDiffHash)
Expand Down Expand Up @@ -2602,7 +2596,7 @@ func (bc *BlockChain) pruneDiffLayer() {
break
}
}
staleDiffHashes := make(map[common.Hash]struct{}, 0)
staleDiffHashes := make(map[common.Hash]struct{})
for blockHash := range staleBlockHashes {
if diffHashes, exist := bc.blockHashToDiffLayers[blockHash]; exist {
for diffHash := range diffHashes {
Expand Down
60 changes: 3 additions & 57 deletions core/rawdb/freezer_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@ package rawdb

import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sync"
"testing"
"time"

Expand Down Expand Up @@ -528,7 +525,6 @@ func TestOffset(t *testing.T) {

f.Append(4, getChunk(20, 0xbb))
f.Append(5, getChunk(20, 0xaa))
f.DumpIndex(0, 100)
f.Close()
}
// Now crop it.
Expand Down Expand Up @@ -575,7 +571,6 @@ func TestOffset(t *testing.T) {
if err != nil {
t.Fatal(err)
}
f.DumpIndex(0, 100)
// It should allow writing item 6
f.Append(numDeleted+2, getChunk(20, 0x99))

Expand Down Expand Up @@ -640,55 +635,6 @@ func TestOffset(t *testing.T) {
// 1. have data files d0, d1, d2, d3
// 2. remove d2,d3
//
// However, all 'normal' failure modes arising due to failing to sync() or save a file
// should be handled already, and the case described above can only (?) happen if an
// external process/user deletes files from the filesystem.

// TestAppendTruncateParallel is a test to check if the Append/truncate operations are
// racy.
//
// The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent
// on timing rather than 'clever' input -- there's no determinism.
func TestAppendTruncateParallel(t *testing.T) {
dir, err := ioutil.TempDir("", "freezer")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true)
if err != nil {
t.Fatal(err)
}

fill := func(mark uint64) []byte {
data := make([]byte, 8)
binary.LittleEndian.PutUint64(data, mark)
return data
}

for i := 0; i < 5000; i++ {
f.truncate(0)
data0 := fill(0)
f.Append(0, data0)
data1 := fill(1)

var wg sync.WaitGroup
wg.Add(2)
go func() {
f.truncate(0)
wg.Done()
}()
go func() {
f.Append(1, data1)
wg.Done()
}()
wg.Wait()

if have, err := f.Retrieve(0); err == nil {
if !bytes.Equal(have, data0) {
t.Fatalf("have %x want %x", have, data0)
}
}
}
}
// However, all 'normal' failure modes arising due to failing to sync() or save a file should be
// handled already, and the case described above can only (?) happen if an external process/user
// deletes files from the filesystem.
1 change: 0 additions & 1 deletion core/state/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,6 @@ func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie
triesArray := [3]*triePair{{root: root, trie: tr.ResetCopy()}, nil, nil}
db.storageTrieCache.Add(addrHash, triesArray)
}
return
}

func (db *cachingDB) Purge() {
Expand Down
8 changes: 4 additions & 4 deletions core/state/snapshot/disklayer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ func TestDiskMerge(t *testing.T) {
base.Storage(conNukeCache, conNukeCacheSlot)

// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
if err := snaps.update(diffRoot, baseRoot, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
Expand Down Expand Up @@ -344,7 +344,7 @@ func TestDiskPartialMerge(t *testing.T) {
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:])

// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash]struct{}{
if err := snaps.update(diffRoot, baseRoot, map[common.Hash]struct{}{
accDelNoCache: {},
accDelCache: {},
conNukeNoCache: {},
Expand Down Expand Up @@ -466,7 +466,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
},
}
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
if err := snaps.update(diffRoot, baseRoot, nil, map[common.Hash][]byte{
accTwo: accTwo[:],
}, nil); err != nil {
t.Fatalf("failed to update snapshot tree: %v", err)
Expand All @@ -484,7 +484,7 @@ func TestDiskGeneratorPersistence(t *testing.T) {
}
// Test scenario 2, the disk layer is fully generated
// Modify or delete some accounts, flatten everything onto disk
if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
if err := snaps.update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{
accThree: accThree.Bytes(),
}, map[common.Hash]map[common.Hash][]byte{
accThree: {accThreeSlot: accThreeSlot.Bytes()},
Expand Down
Loading

0 comments on commit 8cc1c3d

Please sign in to comment.