diff --git a/.travis.yml b/.travis.yml
index 416a83018d..21ee5ffc76 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,17 +18,7 @@ jobs:
- stage: build
os: linux
dist: xenial
- go: 1.11.x
- env:
- - GO111MODULE=on
- script:
- - go run build/ci.go install
- - go run build/ci.go test -coverage $TEST_PACKAGES
-
- - stage: build
- os: linux
- dist: xenial
- go: 1.12.x
+ go: 1.13.x
env:
- GO111MODULE=on
script:
diff --git a/consensus/istanbul/backend/engine.go b/consensus/istanbul/backend/engine.go
index cad9faf406..a9019f67e2 100644
--- a/consensus/istanbul/backend/engine.go
+++ b/consensus/istanbul/backend/engine.go
@@ -134,6 +134,27 @@ func (sb *Backend) verifyHeader(chain consensus.ChainReader, header *types.Heade
return sb.verifyCascadingFields(chain, header, parents)
}
+// A sanity check for lightest mode. Checks that the correct epoch block exists for this header
+func (sb *Backend) checkEpochBlockExists(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
+ number := header.Number.Uint64()
+ // Check that latest epoch block is available
+ epoch := istanbul.GetEpochNumber(number, sb.config.Epoch)
+ epochBlockNumber := istanbul.GetEpochLastBlockNumber(epoch-1, sb.config.Epoch)
+ if number == epochBlockNumber {
+ epochBlockNumber = istanbul.GetEpochLastBlockNumber(epoch-2, sb.config.Epoch)
+ }
+ for _, hdr := range parents {
+ if hdr.Number.Uint64() == epochBlockNumber {
+ return nil
+ }
+ }
+ parent := chain.GetHeaderByNumber(epochBlockNumber)
+ if parent == nil || parent.Number.Uint64() != epochBlockNumber {
+ return consensus.ErrUnknownAncestor
+ }
+ return nil
+}
+
// verifyCascadingFields verifies all the header fields that are not standalone,
// rather depend on a batch of previous headers. The caller may optionally pass
// in a batch of parents (ascending order) to avoid looking those up from the
@@ -163,6 +184,8 @@ func (sb *Backend) verifyCascadingFields(chain consensus.ChainReader, header *ty
if err := sb.verifySigner(chain, header, parents); err != nil {
return err
}
+ } else if err := sb.checkEpochBlockExists(chain, header, parents); err != nil {
+ return err
}
return sb.verifyAggregatedSeals(chain, header, parents)
@@ -764,6 +787,7 @@ func (sb *Backend) snapshot(chain consensus.ChainReader, number uint64, hash com
if s, err := loadSnapshot(sb.config.Epoch, sb.db, blockHash); err == nil {
log.Trace("Loaded validator set snapshot from disk", "number", numberIter, "hash", blockHash)
snap = s
+ sb.recentSnapshots.Add(numberIter, snap)
break
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 1bab34863e..c4a7efe4fc 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -119,6 +119,14 @@ type CacheConfig struct {
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
}
+// defaultCacheConfig are the default caching values if none are specified by the
+// user (also used during testing).
+var defaultCacheConfig = &CacheConfig{
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+}
+
// BlockChain represents the canonical chain given a database with a genesis
// block. The Blockchain manages chain imports, reverts, chain reorganisations.
//
@@ -186,11 +194,7 @@ type BlockChain struct {
// Processor.
func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool) (*BlockChain, error) {
if cacheConfig == nil {
- cacheConfig = &CacheConfig{
- TrieCleanLimit: 256,
- TrieDirtyLimit: 256,
- TrieTimeLimit: 5 * time.Minute,
- }
+ cacheConfig = defaultCacheConfig
}
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
@@ -240,14 +244,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
if bc.empty() {
rawdb.InitDatabaseFromFreezer(bc.db)
}
-
if err := bc.loadLastState(); err != nil {
return nil, err
}
- // The first thing the node will do is reconstruct the verification data for
- // the head block. Might as well do it in advance.
- bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
-
+ // Make sure the state associated with the block is available
+ head := bc.CurrentBlock()
+ if _, err := state.New(head.Root(), bc.stateCache); err != nil {
+ log.Warn("Head state missing, repairing", "number", head.Number(), "hash", head.Hash())
+ if err := bc.SetHead(head.NumberU64()); err != nil {
+ return nil, err
+ }
+ }
+ // Ensure that a previous crash in SetHead doesn't leave extra ancients
if frozen, err := bc.db.Ancients(); err == nil && frozen > 0 {
var (
needRewind bool
@@ -257,7 +265,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// blockchain repair. If the head full block is even lower than the ancient
// chain, truncate the ancient store.
fullBlock := bc.CurrentBlock()
- if fullBlock != nil && fullBlock != bc.genesisBlock && fullBlock.NumberU64() < frozen-1 {
+ if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.NumberU64() < frozen-1 {
needRewind = true
low = fullBlock.NumberU64()
}
@@ -272,15 +280,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
}
if needRewind {
- var hashes []common.Hash
- previous := bc.CurrentHeader().Number.Uint64()
- for i := low + 1; i <= bc.CurrentHeader().Number.Uint64(); i++ {
- hashes = append(hashes, rawdb.ReadCanonicalHash(bc.db, i))
+ log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low)
+ if err := bc.SetHead(low); err != nil {
+ return nil, err
}
- bc.Rollback(hashes, true)
- log.Warn("Truncate ancient chain", "from", previous, "to", low)
}
}
+ // The first thing the node will do is reconstruct the verification data for
+ // the head block (ethash cache or clique voting snapshot). Might as well do
+ // it in advance.
+ bc.engine.VerifyHeader(bc, bc.CurrentHeader(), true)
+
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash := range BadHashes {
if header := bc.GetHeaderByHash(hash); header != nil {
@@ -289,7 +299,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// make sure the headerByNumber (if present) is in our current canonical chain
if headerByNumber != nil && headerByNumber.Hash() == header.Hash() {
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash)
- bc.SetHead(header.Number.Uint64() - 1)
+ if err := bc.SetHead(header.Number.Uint64() - 1); err != nil {
+ return nil, err
+ }
log.Error("Chain rewind was successful, resuming normal operation")
}
}
@@ -344,15 +356,6 @@ func (bc *BlockChain) loadLastState() error {
log.Warn("Head block missing, resetting chain", "hash", head)
return bc.Reset()
}
- // Make sure the state associated with the block is available
- if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
- // Dangling block without a state associated, init from scratch
- log.Warn("Head state missing, repairing chain", "number", currentBlock.Number(), "hash", currentBlock.Hash())
- if err := bc.repair(¤tBlock); err != nil {
- return err
- }
- rawdb.WriteHeadBlockHash(bc.db, currentBlock.Hash())
- }
// Everything seems to be fine, set as the head block
bc.currentBlock.Store(currentBlock)
headBlockGauge.Update(int64(currentBlock.NumberU64()))
@@ -387,30 +390,48 @@ func (bc *BlockChain) loadLastState() error {
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(currentHeader.Time), 0)))
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(currentBlock.Time()), 0)))
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd, "age", common.PrettyAge(time.Unix(int64(currentFastBlock.Time()), 0)))
-
+ if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
+ log.Info("Loaded last fast-sync pivot marker", "number", *pivot)
+ }
return nil
}
-// SetHead rewinds the local chain to a new head. In the case of headers, everything
-// above the new head will be deleted and the new one set. In the case of blocks
-// though, the head may be further rewound if block bodies are missing (non-archive
-// nodes after a fast sync).
+// SetHead rewinds the local chain to a new head. Depending on whether the node
+// was fast synced or full synced and in which state, the method will try to
+// delete minimal data from disk whilst retaining chain consistency.
func (bc *BlockChain) SetHead(head uint64) error {
- log.Warn("Rewinding blockchain", "target", head)
-
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- updateFn := func(db ethdb.KeyValueWriter, header *types.Header) {
- // Rewind the block chain, ensuring we don't end up with a stateless head block
- if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() < currentBlock.NumberU64() {
+ // Retrieve the last pivot block to short circuit rollbacks beyond it and the
+ // current freezer limit to start nuking id underflown
+ pivot := rawdb.ReadLastPivotNumber(bc.db)
+ frozen, _ := bc.db.Ancients()
+
+ updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (uint64, bool) {
+ // Rewind the block chain, ensuring we don't end up with a stateless head
+ // block. Note, depth equality is permitted to allow using SetHead as a
+ // chain reparation mechanism without deleting any data!
+ if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.NumberU64() {
newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
if newHeadBlock == nil {
+ log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash())
newHeadBlock = bc.genesisBlock
} else {
- if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
- // Rewound state missing, rolled back to before pivot, reset to genesis
- newHeadBlock = bc.genesisBlock
+ // Block exists, keep rewinding until we find one with state
+ for {
+ if _, err := state.New(newHeadBlock.Root(), bc.stateCache); err != nil {
+ log.Info("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+ if pivot == nil || newHeadBlock.NumberU64() > *pivot {
+ newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1)
+ continue
+ } else {
+ log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot)
+ newHeadBlock = bc.genesisBlock
+ }
+ }
+ log.Info("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
+ break
}
}
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
@@ -422,7 +443,6 @@ func (bc *BlockChain) SetHead(head uint64) error {
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
-
// Rewind the fast block in a simpleton way to the target head
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && header.Number.Uint64() < currentFastBlock.NumberU64() {
newHeadFastBlock := bc.GetBlock(header.Hash(), header.Number.Uint64())
@@ -438,9 +458,19 @@ func (bc *BlockChain) SetHead(head uint64) error {
// to low, so it's safe the update in-memory markers directly.
bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
+ log.Info("Rewound fast block", "number", newHeadFastBlock.NumberU64())
}
- }
+ head := bc.CurrentBlock().NumberU64()
+ // If setHead underflown the freezer threshold and the block processing
+ // intent afterwards is full block importing, delete the chain segment
+ // between the stateful-block and the sethead target.
+ var wipe bool
+ if head+1 < frozen {
+ wipe = pivot == nil || head >= *pivot
+ }
+ return head, wipe // Only force wipe if full synced
+ }
// Rewind the header chain, deleting all block bodies until then
delFn := func(db ethdb.KeyValueWriter, hash common.Hash, num uint64) {
// Ignore the error here since light client won't hit this path
@@ -448,10 +478,9 @@ func (bc *BlockChain) SetHead(head uint64) error {
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
- if err := bc.db.TruncateAncients(num + 1); err != nil {
+ if err := bc.db.TruncateAncients(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
-
// Remove the hash <-> number mapping from the active store.
rawdb.DeleteHeaderNumber(db, hash)
} else {
@@ -463,8 +492,18 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
// Todo(rjl493456442) txlookup, bloombits, etc
}
- bc.hc.SetHead(head, updateFn, delFn)
-
+ // If SetHead was only called as a chain reparation method, try to skip
+ // touching the header chain altogether, unless the freezer is broken
+ if block := bc.CurrentBlock(); block.NumberU64() == head {
+ if target, force := updateFn(bc.db, block.Header()); force {
+ bc.hc.SetHead(target, updateFn, delFn)
+ }
+ } else {
+ // Rewind the chain to the requested head and keep going backwards until a
+ // block with a state is found or fast sync pivot is passed
+ log.Warn("Rewinding blockchain", "target", head)
+ bc.hc.SetHead(head, updateFn, delFn)
+ }
// Clear out any stale content from the caches
bc.bodyCache.Purge()
bc.bodyRLPCache.Purge()
@@ -569,36 +608,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
return nil
}
-// repair tries to repair the current blockchain by rolling back the current block
-// until one with associated state is found. This is needed to fix incomplete db
-// writes caused either by crashes/power outages, or simply non-committed tries.
-//
-// This method only rolls back the current block. The current header and current
-// fast block are left intact.
-func (bc *BlockChain) repair(head **types.Block) error {
- batch := bc.db.NewBatch()
-
- for {
- // Abort if we've rewound to a head block that does have associated state
- if _, err := state.New((*head).Root(), bc.stateCache); err == nil {
- log.Info("Rewound blockchain to past state", "number", (*head).Number(), "hash", (*head).Hash())
- if err := batch.Write(); err != nil {
- log.Error("Error when removing sidechain", "err", err)
- }
- bc.purge()
- return nil
- }
- // Otherwise rewind one block and recheck state availability there
- block := bc.GetBlock((*head).ParentHash(), (*head).NumberU64()-1)
- if block == nil {
- return fmt.Errorf("missing block %d [%x]", (*head).NumberU64()-1, (*head).ParentHash())
- }
- // It's safest to remove these blocks
- rawdb.DeleteBlock(batch, (*head).Hash(), (*head).NumberU64())
- *head = block
- }
-}
-
// Export writes the active chain to the given writer.
func (bc *BlockChain) Export(w io.Writer) error {
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
@@ -893,52 +902,6 @@ const (
SideStatTy
)
-// Rollback is designed to remove a chain of links from the database that aren't
-// certain enough to be valid.
-func (bc *BlockChain) Rollback(chain []common.Hash, fullHeaderChainAvailable bool) {
- bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
-
- batch := bc.db.NewBatch()
- for i := len(chain) - 1; i >= 0; i-- {
- hash := chain[i]
-
- // Degrade the chain markers if they are explicitly reverted.
- // In theory we should update all in-memory markers in the
- // last step, however the direction of rollback is from high
- // to low, so it's safe the update in-memory markers directly.
- currentHeader := bc.hc.CurrentHeader()
- if currentHeader.Hash() == hash {
- newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
- rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
- bc.hc.SetCurrentHeader(newHeadHeader)
- }
- if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
- newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
- rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
- bc.currentFastBlock.Store(newFastBlock)
- headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
- }
- if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
- newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
- rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
- bc.currentBlock.Store(newBlock)
- headBlockGauge.Update(int64(newBlock.NumberU64()))
- }
- }
- if err := batch.Write(); err != nil {
- log.Crit("Failed to rollback chain markers", "err", err)
- }
- // Truncate ancient data which exceeds the current header.
- //
- // Notably, it can happen that system crashes without truncating the ancient data
- // but the head indicator has been updated in the active store. Regarding this issue,
- // system will self recovery by truncating the extra data during the setup phase.
- if err := bc.truncateAncient(bc.hc.CurrentHeader().Number.Uint64()); err != nil {
- log.Crit("Truncate ancient store failed", "err", err)
- }
-}
-
// truncateAncient rewinds the blockchain to the specified header and deletes all
// data in the ancient store that exceeds the specified header.
func (bc *BlockChain) truncateAncient(head uint64) error {
@@ -2235,7 +2198,8 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int, co
_, err := bc.hc.WriteHeader(header)
return err
}
- return bc.hc.InsertHeaderChain(chain, whFunc, start)
+ res, err := bc.hc.InsertHeaderChain(chain, whFunc, start)
+ return res, err
}
// CurrentHeader retrieves the current head header of the canonical chain. The
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
new file mode 100644
index 0000000000..7450546a8f
--- /dev/null
+++ b/core/blockchain_repair_test.go
@@ -0,0 +1,1272 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Tests that abnormal program termination (i.e.crash) and restart doesn't leave
+// the database in some strange state with gaps in the chain, nor with block data
+// dangling in the future.
+
+package core
+
+import (
+ // "fmt"
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ mockEngine "github.com/ethereum/go-ethereum/consensus/consensustest"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// Tests a recovery for a short canonical chain where a recent block was already
+// committed to disk and then the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left
+// in the database for replaying.
+func TestShortRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// already committed, after which the process crashed. In this case we expect the full
+// chain to be rolled back to the committed block, but the chain data itself left in
+// the database for replaying.
+func TestShortFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain where the fast sync pivot point was
+// not yet committed, but the process crashed. In this case we expect the chain to
+// detect that it was fast syncing and not delete anything, since we can just pick
+// up directly where we left off.
+func TestShortFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical chain to be rolled back to the committed block, but the chain data
+// itself left in the database for replaying.
+func TestShortOldForkedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical chain to be rolled back to the committed block,
+// but the chain data itself left in the database for replaying.
+func TestShortOldForkedFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and not delete anything, since we
+// can just pick up directly where we left off.
+func TestShortOldForkedFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then the process crashed. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical chain to be rolled back to the committed block, but the
+// chain data itself left in the database for replaying.
+func TestShortNewlyForkedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then the process
+// crashed. In this test scenario the side chain reaches above the committed block.
+// In this case we expect the canonical chain to be rolled back to the committed
+// block, but the chain data itself left in the database for replaying.
+func TestShortNewlyForkedFastSyncedRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but the process crashed. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and not delete
+// anything, since we can just pick up directly where we left off.
+func TestShortNewlyForkedFastSyncingRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ // └->S1->S2->S3->S4->S5->S6
+ //
+ // Expected head header : C8
+ // Expected head fast block: C8
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 6,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 8,
+ expSidechainBlocks: 6,
+ expFrozen: 0,
+ expHeadHeader: 8,
+ expHeadFastBlock: 8,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where a recent
+// block - newer than the ancient limit - was already committed to disk and then
+// the process crashed. In this case we expect the chain to be rolled back to the
+// committed block, with everything afterwads kept as fast sync data.
+func TestLongShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where a recent
+// block - older than the ancient limit - was already committed to disk and then
+// the process crashed. In this case we expect the chain to be rolled back to the
+// committed block, with everything afterwads deleted.
+func TestLongDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was already committed, after
+// which the process crashed. In this case we expect the chain to be rolled back
+// to the committed block, with everything afterwads kept as fast sync data.
+func TestLongFastSyncedShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was already committed, after
+// which the process crashed. In this case we expect the chain to be rolled back
+// to the committed block, with everything afterwads deleted.
+func TestLongFastSyncedDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was not yet committed, but the
+// process crashed. In this case we expect the chain to detect that it was fast
+// syncing and not delete anything, since we can just pick up directly where we
+// left off.
+func TestLongFastSyncingShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was not yet committed, but the
+// process crashed. In this case we expect the chain to detect that it was fast
+// syncing and not delete anything, since we can just pick up directly where we
+// left off.
+func TestLongFastSyncingDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected in leveldb:
+ // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+ //
+ // Expected head header : C24
+ // Expected head fast block: C24
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 24,
+ expSidechainBlocks: 0,
+ expFrozen: 9,
+ expHeadHeader: 24,
+ expHeadFastBlock: 24,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to be
+// rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongOldForkedShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the canonical chain
+// to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongOldForkedDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is below the committed block. In this case we expect the chain
+// to be rolled back to the committed block, with everything afterwads kept as
+// fast sync data; the side chain completely nuked by the freezer.
+func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// chain to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected in leveldb:
+ // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+ //
+ // Expected head header : C24
+ // Expected head fast block: C24
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 24,
+ expSidechainBlocks: 0,
+ expFrozen: 9,
+ expHeadHeader: 24,
+ expHeadFastBlock: 24,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to be
+// rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongNewerForkedShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the canonical chain
+// to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongNewerForkedDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is above the committed block. In this case we expect the chain
+// to be rolled back to the committed block, with everything afterwads kept as fast
+// sync data; the side chain completely nuked by the freezer.
+func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then the process crashed. In this test scenario
+// the side chain is above the committed block. In this case we expect the canonical
+// chain to be rolled back to the committed block, with everything afterwads deleted;
+// the side chain completely nuked by the freezer.
+func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18
+ //
+ // Expected head header : C18
+ // Expected head fast block: C18
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 18,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 18,
+ expHeadFastBlock: 18,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a recovery for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but the process crashed. In this test scenario the side
+// chain is above the committed block. In this case we expect the chain to detect
+// that it was fast syncing and not delete anything. The side chain is completely
+// nuked by the freezer.
+func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // CRASH
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Expected in leveldb:
+ // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24
+ //
+ // Expected head header : C24
+ // Expected head fast block: C24
+ // Expected head block : G
+ testRepair(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ expCanonicalBlocks: 24,
+ expSidechainBlocks: 0,
+ expFrozen: 9,
+ expHeadHeader: 24,
+ expHeadFastBlock: 24,
+ expHeadBlock: 0,
+ })
+}
+
+func testRepair(t *testing.T, tt *rewindTest) {
+ // It's hard to follow the test case, visualize the input
+ //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ //fmt.Println(tt.dump(true))
+
+ // Create a temporary persistent database
+ datadir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary datadir: %v", err)
+ }
+ os.RemoveAll(datadir)
+
+ db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ defer db.Close() // Might double close, should be fine
+
+ // Initialize a fresh chain
+ var (
+ genesis = new(Genesis).MustCommit(db)
+ engine = mockEngine.NewFaker()
+ )
+ chain, err := NewBlockChain(db, nil, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to create chain: %v", err)
+ }
+ // If sidechain blocks are needed, make a light chain and import it
+ var sideblocks types.Blocks
+ if tt.sidechainBlocks > 0 {
+ sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x01})
+ })
+ if _, err := chain.InsertChain(sideblocks); err != nil {
+ t.Fatalf("Failed to import side chain: %v", err)
+ }
+ }
+ canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x02})
+ // b.SetDifficulty(big.NewInt(1000000))
+ })
+ if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+ t.Fatalf("Failed to import canonical chain start: %v", err)
+ }
+ if tt.commitBlock > 0 {
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ }
+ if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+ t.Fatalf("Failed to import canonical chain tail: %v", err)
+ }
+ // Force run a freeze cycle
+ type freezer interface {
+ Freeze(threshold uint64)
+ Ancients() (uint64, error)
+ }
+ db.(freezer).Freeze(tt.freezeThreshold)
+
+ // Set the simulated pivot block
+ if tt.pivotBlock != nil {
+ rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+ }
+ // Pull the plug on the database, simulating a hard crash
+ db.Close()
+
+ // Start a new blockchain back up and see where the repair leads us
+ db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to reopen persistent database: %v", err)
+ }
+ defer db.Close()
+ chain, err = NewBlockChain(db, nil, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to recreate chain: %v", err)
+ }
+ defer chain.Stop()
+
+ // Iterate over all the remaining blocks and ensure there are no gaps
+ verifyNoGaps(t, chain, true, canonblocks)
+ verifyNoGaps(t, chain, false, sideblocks)
+ verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+ if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+ }
+ if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+ }
+ if frozen, err := db.(freezer).Ancients(); err != nil {
+ t.Errorf("Failed to retrieve ancient count: %v\n", err)
+ } else if int(frozen) != tt.expFrozen {
+ t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+ }
+}
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
new file mode 100644
index 0000000000..9b7065897e
--- /dev/null
+++ b/core/blockchain_sethead_test.go
@@ -0,0 +1,1546 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Tests that setting the chain head backwards doesn't leave the database in some
+// strange state with gaps in the chain, nor with block data dangling in the future.
+
+package core
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ mockEngine "github.com/ethereum/go-ethereum/consensus/consensustest"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// rewindTest is a test case for chain rollback upon user request.
+type rewindTest struct {
+ canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier)
+ sidechainBlocks int // Number of blocks to generate for the side chain (lighter)
+ freezeThreshold uint64 // Block number until which to move things into the freezer
+ commitBlock uint64 // Block number for which to commit the state to disk
+ pivotBlock *uint64 // Pivot block number in case of fast sync
+
+ setheadBlock uint64 // Block number to set head back to
+ expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
+ expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis)
+ expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis)
+ expHeadHeader uint64 // Block number of the expected head header
+ expHeadFastBlock uint64 // Block number of the expected head fast sync block
+ expHeadBlock uint64 // Block number of the expected head full block
+}
+
+func (tt *rewindTest) Dump(crash bool) string {
+ buffer := new(strings.Builder)
+
+ fmt.Fprint(buffer, "Chain:\n G")
+ for i := 0; i < tt.canonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, " (HEAD)\n")
+ if tt.sidechainBlocks > 0 {
+ fmt.Fprintf(buffer, " └")
+ for i := 0; i < tt.sidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ fmt.Fprintf(buffer, "\n")
+
+ if tt.canonicalBlocks > int(tt.freezeThreshold) {
+ fmt.Fprint(buffer, "Frozen:\n G")
+ for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n\n")
+ } else {
+ fmt.Fprintf(buffer, "Frozen: none\n")
+ }
+ fmt.Fprintf(buffer, "Commit: G")
+ if tt.commitBlock > 0 {
+ fmt.Fprintf(buffer, ", C%d", tt.commitBlock)
+ }
+ fmt.Fprint(buffer, "\n")
+
+ if tt.pivotBlock == nil {
+ fmt.Fprintf(buffer, "Pivot : none\n")
+ } else {
+ fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock)
+ }
+ if crash {
+ fmt.Fprintf(buffer, "\nCRASH\n\n")
+ } else {
+ fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock)
+ }
+ fmt.Fprintf(buffer, "------------------------------\n\n")
+
+ if tt.expFrozen > 0 {
+ fmt.Fprint(buffer, "Expected in freezer:\n G")
+ for i := 0; i < tt.expFrozen-1; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n\n")
+ }
+ if tt.expFrozen > 0 {
+ if tt.expFrozen >= tt.expCanonicalBlocks {
+ fmt.Fprintf(buffer, "Expected in leveldb: none\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1)
+ for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, "\n")
+ if tt.expSidechainBlocks > tt.expFrozen {
+ fmt.Fprintf(buffer, " └")
+ for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ }
+ } else {
+ fmt.Fprint(buffer, "Expected in leveldb:\n G")
+ for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ {
+ fmt.Fprintf(buffer, "->C%d", i+1)
+ }
+ fmt.Fprint(buffer, "\n")
+ if tt.expSidechainBlocks > tt.expFrozen {
+ fmt.Fprintf(buffer, " └")
+ for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ {
+ fmt.Fprintf(buffer, "->S%d", i+1)
+ }
+ fmt.Fprintf(buffer, "\n")
+ }
+ }
+ fmt.Fprintf(buffer, "\n")
+ fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader)
+ fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock)
+ if tt.expHeadBlock == 0 {
+ fmt.Fprintf(buffer, "Expected head block : G\n")
+ } else {
+ fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock)
+ }
+ return buffer.String()
+}
+
+// Tests a sethead for a short canonical chain where a recent block was already
+// committed to disk and then the sethead called. In this case we expect the full
+// chain to be rolled back to the committed block. Everything above the sethead
+// point should be deleted. In between the committed block and the requested head
+// the data can remain as "fast sync" data to avoid redownloading it.
+func TestShortSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// already committed, after which sethead was called. In this case we expect the
+// chain to behave like in full sync mode, rolling back to the committed block
+// Everything above the sethead point should be deleted. In between the committed
+// block and the requested head the data can remain as "fast sync" data to avoid
+// redownloading it.
+func TestShortFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain where the fast sync pivot point was
+// not yet committed, but sethead was called. In this case we expect the chain to
+// detect that it was fast syncing and delete everything from the new head, since
+// we can just pick up fast syncing from there. The head full block should be set
+// to the genesis.
+func TestShortFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 0,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the canonical full chain to be rolled back to the committed block. Everything
+// above the sethead point should be deleted. In between the committed block and
+// the requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this test scenario the side chain is below the committed block. In
+// this case we expect the canonical full chain to be rolled back to the committed
+// block. Everything above the sethead point should be deleted. In between the
+// committed block and the requested head the data can remain as "fast sync" data
+// to avoid redownloading it. The side chain should be left alone as it was shorter.
+func TestShortOldForkedFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In this
+// test scenario the side chain is below the committed block. In this case we expect
+// the chain to detect that it was fast syncing and delete everything from the new
+// head, since we can just pick up fast syncing from there. The head full block
+// should be set to the genesis.
+func TestShortOldForkedFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 8,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 3,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where a
+// recent block was already committed to disk and then sethead was called. In this
+// test scenario the side chain reaches above the committed block. In this case we
+// expect the canonical full chain to be rolled back to the committed block. All
+// data above the sethead point should be deleted. In between the committed block
+// and the requested head the data can remain as "fast sync" data to avoid having
+// to redownload it. The side chain should be truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was already committed to disk and then sethead was
+// called. In this case we expect the canonical full chain to be rolled back to
+// between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid having to redownload it. The side chain should be
+// truncated to the head set.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a short canonical chain and a shorter side chain, where
+// the fast sync pivot point was not yet committed, but sethead was called. In
+// this test scenario the side chain reaches above the committed block. In this
+// case we expect the chain to detect that it was fast syncing and delete
+// everything from the new head, since we can just pick up fast syncing from
+// there.
+//
+// The side chain could be left to be if the fork point was before the new head
+// we are deleting to, but it would be exceedingly hard to detect that case and
+// properly handle it, so we'll trade extra work in exchange for simpler code.
+func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8
+ //
+ // Frozen: none
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(7)
+ //
+ // ------------------------------
+ //
+ // Expected in leveldb:
+ // G->C1->C2->C3->C4->C5->C6->C7
+ // └->S1->S2->S3->S4->S5->S6->S7
+ //
+ // Expected head header : C7
+ // Expected head fast block: C7
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 10,
+ sidechainBlocks: 8,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 7,
+ expCanonicalBlocks: 7,
+ expSidechainBlocks: 7,
+ expFrozen: 0,
+ expHeadHeader: 7,
+ expHeadFastBlock: 7,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - newer than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Everything above the sethead point should be deleted.
+// In between the committed block and the requested head the data can remain as
+// "fast sync" data to avoid redownloading it.
+func TestLongShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where a recent
+// block - older than the ancient limit - was already committed to disk and then
+// sethead was called. In this case we expect the full chain to be rolled back
+// to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Everything above the sethead point should be
+// deleted. In between the committed block and the requested head the data can
+// remain as "fast sync" data to avoid redownloading it.
+func TestLongFastSyncedShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was already committed, after
+// which sethead was called. In this case we expect the full chain to be rolled
+// back to the committed block. Since the ancient limit was underflown, everything
+// needs to be deleted onwards to avoid creating a gap.
+func TestLongFastSyncedDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - newer than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks where the fast
+// sync pivot point - older than the ancient limit - was not yet committed, but
+// sethead was called. In this case we expect the chain to detect that it was fast
+// syncing and delete everything from the new head, since we can just pick up fast
+// syncing from there.
+func TestLongFastSyncingDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 0,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - newer than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Everything above the sethead point
+// should be deleted. In between the committed block and the requested head the data
+// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked
+// by the freezer.
+func TestLongOldForkedShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter side
+// chain, where a recent block - older than the ancient limit - was already committed
+// to disk and then sethead was called. In this case we expect the canonical full
+// chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Everything above the
+// sethead point should be deleted. In between the committed block and the
+// requested head the data can remain as "fast sync" data to avoid redownloading
+// it. The side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is below the committed block. In this case we expect the canonical
+// full chain to be rolled back to the committed block. Since the ancient limit was
+// underflown, everything needs to be deleted onwards to avoid creating a gap. The
+// side chain is nuked by the freezer.
+func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is below the committed block. In this case we expect the chain to detect
+// that it was fast syncing and delete everything from the new head, since we can
+// just pick up fast syncing from there. The side chain is completely nuked by the
+// freezer.
+func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 3,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - newer than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongShallowSetHead.
+func TestLongNewerForkedShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where a recent block - older than the ancient limit - was already
+// committed to disk and then sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongDeepSetHead.
+func TestLongNewerForkedDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : none
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: nil,
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead.
+func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : C4
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was already committed to disk and then sethead was called. In this test scenario
+// the side chain is above the committed block. In this case the freezer will delete
+// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead.
+func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G, C4
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C4
+ // Expected head fast block: C4
+ // Expected head block : C
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 4,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 4,
+ expSidechainBlocks: 0,
+ expFrozen: 5,
+ expHeadHeader: 4,
+ expHeadFastBlock: 4,
+ expHeadBlock: 4,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - newer than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead.
+func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2
+ //
+ // Expected in leveldb:
+ // C2)->C3->C4->C5->C6
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 18,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 3,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+// Tests a sethead for a long canonical chain with frozen blocks and a shorter
+// side chain, where the fast sync pivot point - older than the ancient limit -
+// was not yet committed, but sethead was called. In this test scenario the side
+// chain is above the committed block. In this case the freezer will delete the
+// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead.
+func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) {
+ // Chain:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD)
+ // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12
+ //
+ // Frozen:
+ // G->C1->C2->C3->C4->C5->C6->C7->C8
+ //
+ // Commit: G
+ // Pivot : C4
+ //
+ // SetHead(6)
+ //
+ // ------------------------------
+ //
+ // Expected in freezer:
+ // G->C1->C2->C3->C4->C5->C6
+ //
+ // Expected in leveldb: none
+ //
+ // Expected head header : C6
+ // Expected head fast block: C6
+ // Expected head block : G
+ testSetHead(t, &rewindTest{
+ canonicalBlocks: 24,
+ sidechainBlocks: 12,
+ freezeThreshold: 16,
+ commitBlock: 0,
+ pivotBlock: uint64ptr(4),
+ setheadBlock: 6,
+ expCanonicalBlocks: 6,
+ expSidechainBlocks: 0,
+ expFrozen: 7,
+ expHeadHeader: 6,
+ expHeadFastBlock: 6,
+ expHeadBlock: 0,
+ })
+}
+
+func testSetHead(t *testing.T, tt *rewindTest) {
+ // It's hard to follow the test case, visualize the input
+ //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
+ //fmt.Println(tt.dump(false))
+
+ // Create a temporary persistent database
+ datadir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary datadir: %v", err)
+ }
+ os.RemoveAll(datadir)
+
+ db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "")
+ if err != nil {
+ t.Fatalf("Failed to create persistent database: %v", err)
+ }
+ defer db.Close()
+
+ // Initialize a fresh chain
+ var (
+ genesis = new(Genesis).MustCommit(db)
+ engine = mockEngine.NewFaker()
+ )
+ chain, err := NewBlockChain(db, nil, params.IstanbulTestChainConfig, engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("Failed to create chain: %v", err)
+ }
+ // If sidechain blocks are needed, make a light chain and import it
+ var sideblocks types.Blocks
+ if tt.sidechainBlocks > 0 {
+ sideblocks, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x01})
+ })
+ if _, err := chain.InsertChain(sideblocks); err != nil {
+ t.Fatalf("Failed to import side chain: %v", err)
+ }
+ }
+ canonblocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) {
+ b.SetCoinbase(common.Address{0x02})
+ // b.SetDifficulty(big.NewInt(1000000))
+ })
+ if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil {
+ t.Fatalf("Failed to import canonical chain start: %v", err)
+ }
+ if tt.commitBlock > 0 {
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
+ }
+ if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil {
+ t.Fatalf("Failed to import canonical chain tail: %v", err)
+ }
+ // Manually dereference anything not committed to not have to work with 128+ tries
+ for _, block := range sideblocks {
+ chain.stateCache.TrieDB().Dereference(block.Root())
+ }
+ for _, block := range canonblocks {
+ chain.stateCache.TrieDB().Dereference(block.Root())
+ }
+ // Force run a freeze cycle
+ type freezer interface {
+ Freeze(threshold uint64)
+ Ancients() (uint64, error)
+ }
+ db.(freezer).Freeze(tt.freezeThreshold)
+
+ // Set the simulated pivot block
+ if tt.pivotBlock != nil {
+ rawdb.WriteLastPivotNumber(db, *tt.pivotBlock)
+ }
+ // Set the head of the chain back to the requested number
+ chain.SetHead(tt.setheadBlock)
+
+ // Iterate over all the remaining blocks and ensure there are no gaps
+ verifyNoGaps(t, chain, true, canonblocks)
+ verifyNoGaps(t, chain, false, sideblocks)
+ verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
+ verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
+
+ if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
+ t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
+ }
+ if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
+ t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
+ }
+ if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
+ t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
+ }
+ if frozen, err := db.(freezer).Ancients(); err != nil {
+ t.Errorf("Failed to retrieve ancient count: %v\n", err)
+ } else if int(frozen) != tt.expFrozen {
+ t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
+ }
+}
+
+// verifyNoGaps checks that there are no gaps after the initial set of blocks in
+// the database and errors if found.
+func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) {
+ t.Helper()
+
+ var end uint64
+ for i := uint64(0); i <= uint64(len(inserted)); i++ {
+ header := chain.GetHeaderByNumber(i)
+ // fmt.Printf("header %v\n", header);
+ if header == nil && end == 0 {
+ end = i
+ }
+ if header != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical header gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain header gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+ end = 0
+ for i := uint64(0); i <= uint64(len(inserted)); i++ {
+ block := chain.GetBlockByNumber(i)
+ // fmt.Printf("block %v\n", block);
+ if block == nil && end == 0 {
+ end = i
+ }
+ if block != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical block gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain block gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+ end = 0
+ for i := uint64(1); i <= uint64(len(inserted)); i++ {
+ receipts := chain.GetReceiptsByHash(inserted[i-1].Hash())
+ // fmt.Printf("receipt %v\n", receipts);
+ if receipts == nil && end == 0 {
+ end = i
+ }
+ if receipts != nil && end > 0 {
+ if canonical {
+ t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1)
+ } else {
+ t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1)
+ }
+ end = 0 // Reset for further gap detection
+ }
+ }
+}
+
+// verifyCutoff checks that there are no chain data available in the chain after
+// the specified limit, but that it is available before.
+func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) {
+ t.Helper()
+
+ for i := 1; i <= len(inserted); i++ {
+ if i <= head {
+ if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil {
+ if canonical {
+ t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil {
+ if canonical {
+ t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil {
+ if canonical {
+ t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ } else {
+ if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil {
+ if canonical {
+ t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil {
+ if canonical {
+ t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil {
+ if canonical {
+ t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ } else {
+ t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head)
+ }
+ }
+ }
+ }
+}
+
+// uint64ptr is a weird helper to allow 1-line constant pointer creation.
+func uint64ptr(n uint64) *uint64 {
+ return &n
+}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 7fc8898eb8..8f561fdc8c 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -811,12 +811,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
return db, func() { os.RemoveAll(dir) }
}
// Configure a subchain to roll back
- remove := []common.Hash{}
- for _, block := range blocks[height/2:] {
- remove = append(remove, block.Hash())
- }
+ remove := blocks[height/2].NumberU64()
+
// Create a small assertion method to check the three heads
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
+ t.Helper()
+
if num := chain.CurrentBlock().NumberU64(); num != block {
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
}
@@ -830,14 +830,18 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as an archive node and ensure all pointers are updated
archiveDb, delfn := makeDb()
defer delfn()
- archive, _ := NewBlockChain(archiveDb, nil, gspec.Config, mockEngine.NewFaker(), vm.Config{}, nil)
+
+ archiveCaching := *defaultCacheConfig
+ archiveCaching.TrieDirtyDisabled = true
+
+ archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec.Config, mockEngine.NewFaker(), vm.Config{}, nil)
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
defer archive.Stop()
assert(t, "archive", archive, height, height, height)
- archive.Rollback(remove, true)
+ archive.SetHead(remove - 1)
assert(t, "archive", archive, height/2, height/2, height/2)
// Import the chain as a non-archive node and ensure all pointers are updated
@@ -857,7 +861,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "fast", fast, height, height, 0)
- fast.Rollback(remove, true)
+ fast.SetHead(remove - 1)
assert(t, "fast", fast, height/2, height/2, 0)
// Import the chain as a ancient-first node and ensure all pointers are updated
@@ -873,12 +877,12 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
assert(t, "ancient", ancient, height, height, 0)
- ancient.Rollback(remove, true)
- assert(t, "ancient", ancient, height/2, height/2, 0)
- if frozen, err := ancientDb.Ancients(); err != nil || frozen != height/2+1 {
- t.Fatalf("failed to truncate ancient store, want %v, have %v", height/2+1, frozen)
- }
+ ancient.SetHead(remove - 1)
+ assert(t, "ancient", ancient, 0, 0, 0)
+ if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
+ t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
+ }
// Import the chain as a light node and ensure all pointers are updated
lightDb, delfn := makeDb()
defer delfn()
@@ -889,7 +893,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
defer light.Stop()
assert(t, "light", light, height, 0, 0)
- light.Rollback(remove, true)
+ light.SetHead(remove - 1)
assert(t, "light", light, height/2, 0, 0)
}
@@ -1724,6 +1728,7 @@ func TestBlockchainRecovery(t *testing.T) {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
+
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "")
if err != nil {
t.Fatalf("failed to create temp freezer db: %v", err)
@@ -1741,6 +1746,7 @@ func TestBlockchainRecovery(t *testing.T) {
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
t.Fatalf("failed to insert receipt %d: %v", n, err)
}
+ rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
ancient.Stop()
// Destroy head fast block manually
@@ -2050,11 +2056,9 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks[len(blocks)-1])
// Import a long canonical chain with some known data as prefix.
- var rollback []common.Hash
- for i := len(blocks) / 2; i < len(blocks); i++ {
- rollback = append(rollback, blocks[i].Hash())
- }
- chain.Rollback(rollback, true)
+ rollback := blocks[len(blocks)/2].NumberU64()
+
+ chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
@@ -2074,11 +2078,7 @@ func testInsertKnownChainData(t *testing.T, typ string) {
asserter(t, blocks3[len(blocks3)-1])
// Rollback the heavier chain and re-insert the longer chain again
- for i := 0; i < len(blocks3); i++ {
- rollback = append(rollback, blocks3[i].Hash())
- }
- chain.Rollback(rollback, true)
-
+ chain.SetHead(rollback - 1)
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
t.Fatalf("failed to insert chain data: %v", err)
}
diff --git a/core/headerchain.go b/core/headerchain.go
index fb61a8329e..3531c7e674 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -156,9 +156,8 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
if ptd == nil {
if hc.config.FullHeaderChainAvailable {
return NonStatTy, consensus.ErrUnknownAncestor
- } else {
- localTd = big.NewInt(hc.CurrentHeader().Number.Int64() + 1)
}
+ localTd = big.NewInt(hc.CurrentHeader().Number.Int64() + 1)
} else {
localTd = hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
}
@@ -322,7 +321,8 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
if hc.HasHeader(hash, header.Number.Uint64()) {
externTd := hc.GetTd(hash, header.Number.Uint64())
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
- if externTd == nil || externTd.Cmp(localTd) <= 0 {
+ // If it has no difficulty, it wasn't stored properly
+ if externTd != nil && externTd.Cmp(localTd) <= 0 {
stats.ignored++
continue
}
@@ -506,8 +506,10 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
type (
// UpdateHeadBlocksCallback is a callback function that is called by SetHead
- // before head header is updated.
- UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header)
+ // before head header is updated. The method will return the actual block it
+ // updated the head to (missing state) and a flag if setHead should continue
+ // rewinding till that forcefully (exceeded ancient limits)
+ UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool)
// DeleteBlockContentCallback is a callback function that is called by SetHead
// before each header is deleted.
@@ -520,26 +522,38 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
var (
parentHash common.Hash
batch = hc.chainDb.NewBatch()
+ origin = true
)
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
- hash, num := hdr.Hash(), hdr.Number.Uint64()
+ num := hdr.Number.Uint64()
// Rewind block chain to new head.
+ var nums []uint64
parent := hc.GetHeader(hdr.ParentHash, num-1)
if parent == nil {
+ if !hc.config.FullHeaderChainAvailable {
+ for i := hc.config.Istanbul.Epoch; i < num; i += hc.config.Istanbul.Epoch {
+ nums = append(nums, i)
+ }
+ }
parent = hc.genesisHeader
}
parentHash = hdr.ParentHash
+
// Notably, since geth has the possibility for setting the head to a low
// height which is even lower than ancient head.
// In order to ensure that the head is always no higher than the data in
- // the database(ancient store or active store), we need to update head
+ // the database (ancient store or active store), we need to update head
// first then remove the relative data from the database.
//
// Update head first(head fast block, head full block) before deleting the data.
markerBatch := hc.chainDb.NewBatch()
if updateFn != nil {
- updateFn(markerBatch, parent)
+ newHead, force := updateFn(markerBatch, parent)
+ if force && newHead < head {
+ log.Warn("Force rewinding till ancient limit", "head", newHead)
+ head = newHead
+ }
}
// Update head header then.
rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
@@ -550,14 +564,33 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
hc.currentHeaderHash = parentHash
headHeaderGauge.Update(parent.Number.Int64())
- // Remove the relative data from the database.
- if delFn != nil {
- delFn(batch, hash, num)
+ // If this is the first iteration, wipe any leftover data upwards too so
+ // we don't end up with dangling daps in the database
+ if origin {
+ for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
+ nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
+ }
+ origin = false
+ }
+ nums = append(nums, num)
+
+ // Remove the related data from the database on all sidechains
+ for _, num := range nums {
+ // Gather all the side fork hashes
+ hashes := rawdb.ReadAllHashes(hc.chainDb, num)
+ if len(hashes) == 0 {
+ // No hashes in the database whatsoever, probably frozen already
+ hashes = append(hashes, hdr.Hash())
+ }
+ for _, hash := range hashes {
+ if delFn != nil {
+ delFn(batch, hash, num)
+ }
+ rawdb.DeleteHeader(batch, hash, num)
+ rawdb.DeleteTd(batch, hash, num)
+ }
+ rawdb.DeleteCanonicalHash(batch, num)
}
- // Rewind header chain to new head.
- rawdb.DeleteHeader(batch, hash, num)
- rawdb.DeleteTd(batch, hash, num)
- rawdb.DeleteCanonicalHash(batch, num)
}
// Flush all accumulated deletions.
if err := batch.Write(); err != nil {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 28a4e1cc82..d4feb2fb5c 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -154,6 +154,32 @@ func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
+// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
+// full synced, the last pivot will always be nil.
+func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
+ data, _ := db.Get(lastPivotKey)
+ if len(data) == 0 {
+ return nil
+ }
+ var pivot uint64
+ if err := rlp.DecodeBytes(data, &pivot); err != nil {
+ log.Error("Invalid pivot block number in database", "err", err)
+ return nil
+ }
+ return &pivot
+}
+
+// WriteLastPivotNumber stores the number of the last pivot block.
+func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
+ enc, err := rlp.EncodeToBytes(pivot)
+ if err != nil {
+ log.Crit("Failed to encode pivot block number", "err", err)
+ }
+ if err := db.Put(lastPivotKey, enc); err != nil {
+ log.Crit("Failed to store pivot block number", "err", err)
+ }
+}
+
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
// reporting correct numbers across restarts.
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index aea5bd51b0..fcc83bb349 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -21,6 +21,7 @@ import (
"errors"
"fmt"
"os"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -53,6 +54,22 @@ func (frdb *freezerdb) Close() error {
return nil
}
+// Freeze is a helper method used for external testing to trigger and block until
+// a freeze cycle completes, without having to sleep for a minute to trigger the
+// automatic background run.
+func (frdb *freezerdb) Freeze(threshold uint64) {
+ // Set the freezer threshold to a temporary value
+ defer func(old uint64) {
+ atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, old)
+ }(atomic.LoadUint64(&frdb.AncientStore.(*freezer).threshold))
+ atomic.StoreUint64(&frdb.AncientStore.(*freezer).threshold, threshold)
+
+ // Trigger a freeze cycle and block until it's done
+ trigger := make(chan struct{}, 1)
+ frdb.AncientStore.(*freezer).trigger <- trigger
+ <-trigger
+}
+
// nofreezedb is a database wrapper that disables freezer data retrievals.
type nofreezedb struct {
ethdb.KeyValueStore
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 5497c59d49..e9acee061e 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -69,10 +69,15 @@ type freezer struct {
// WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
- frozen uint64 // Number of blocks already frozen
+ frozen uint64 // Number of blocks already frozen
+ threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
tables map[string]*freezerTable // Data tables for storing everything
instanceLock fileutil.Releaser // File-system lock to prevent double opens
+
+ trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
+
+ quit chan struct{}
}
// newFreezer creates a chain freezer that moves ancient chain data into
@@ -99,8 +104,11 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
}
// Open all the supported data tables
freezer := &freezer{
+ threshold: params.FullImmutabilityThreshold,
tables: make(map[string]*freezerTable),
instanceLock: lock,
+ trigger: make(chan chan struct{}),
+ quit: make(chan struct{}),
}
for name, disableSnappy := range freezerNoSnappy {
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
@@ -254,39 +262,66 @@ func (f *freezer) Sync() error {
func (f *freezer) freeze(db ethdb.KeyValueStore) {
nfdb := &nofreezedb{KeyValueStore: db}
+ var (
+ backoff bool
+ triggered chan struct{} // Used in tests
+ )
for {
+ select {
+ case <-f.quit:
+ log.Info("Freezer shutting down")
+ return
+ default:
+ }
+ if backoff {
+ // If we were doing a manual trigger, notify it
+ if triggered != nil {
+ triggered <- struct{}{}
+ triggered = nil
+ }
+ select {
+ case <-time.NewTimer(freezerRecheckInterval).C:
+ backoff = false
+ case triggered = <-f.trigger:
+ backoff = false
+ case <-f.quit:
+ return
+ }
+ }
// Retrieve the freezing threshold.
hash := ReadHeadBlockHash(nfdb)
if hash == (common.Hash{}) {
log.Debug("Current full block hash unavailable") // new chain, empty database
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
number := ReadHeaderNumber(nfdb, hash)
+ threshold := atomic.LoadUint64(&f.threshold)
+
switch {
case number == nil:
log.Error("Current full block number unavailable", "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
- case *number < params.ImmutabilityThreshold:
- log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", params.ImmutabilityThreshold)
- time.Sleep(freezerRecheckInterval)
+ case *number < threshold:
+ log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
+ backoff = true
continue
- case *number-params.ImmutabilityThreshold <= f.frozen:
+ case *number-threshold <= f.frozen:
log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
head := ReadHeader(nfdb, hash, *number)
if head == nil {
log.Error("Current full block unavailable", "number", *number, "hash", hash)
- time.Sleep(freezerRecheckInterval)
+ backoff = true
continue
}
// Seems we have data ready to be frozen, process in usable batches
- limit := *number - params.ImmutabilityThreshold
+ limit := *number - threshold
if limit-f.frozen > freezerBatchLimit {
limit = f.frozen + freezerBatchLimit
}
@@ -295,7 +330,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
first = f.frozen
ancients = make([]common.Hash, 0, limit)
)
- for f.frozen < limit {
+ for f.frozen <= limit {
// Retrieves all the components of the canonical block
hash := ReadCanonicalHash(nfdb, f.frozen)
if hash == (common.Hash{}) {
@@ -346,11 +381,15 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
log.Crit("Failed to delete frozen canonical blocks", "err", err)
}
batch.Reset()
- // Wipe out side chain also.
+
+ // Wipe out side chains also and track dangling side chians
+ var dangling []common.Hash
for number := first; number < f.frozen; number++ {
// Always keep the genesis block in active database
if number != 0 {
- for _, hash := range ReadAllHashes(db, number) {
+ dangling = ReadAllHashes(db, number)
+ for _, hash := range dangling {
+ log.Trace("Deleting side chain", "number", number, "hash", hash)
DeleteBlock(batch, hash, number)
}
}
@@ -358,6 +397,41 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
if err := batch.Write(); err != nil {
log.Crit("Failed to delete frozen side blocks", "err", err)
}
+ batch.Reset()
+
+ // Step into the future and delete and dangling side chains
+ if f.frozen > 0 {
+ tip := f.frozen
+ for len(dangling) > 0 {
+ drop := make(map[common.Hash]struct{})
+ for _, hash := range dangling {
+ log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
+ drop[hash] = struct{}{}
+ }
+ children := ReadAllHashes(db, tip)
+ for i := 0; i < len(children); i++ {
+ // Dig up the child and ensure it's dangling
+ child := ReadHeader(nfdb, children[i], tip)
+ if child == nil {
+ log.Error("Missing dangling header", "number", tip, "hash", children[i])
+ continue
+ }
+ if _, ok := drop[child.ParentHash]; !ok {
+ children = append(children[:i], children[i+1:]...)
+ i--
+ continue
+ }
+ // Delete all block data associated with the child
+ log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
+ DeleteBlock(batch, children[i], tip)
+ }
+ dangling = children
+ tip++
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to delete dangling side blocks", "err", err)
+ }
+ }
// Log something friendly for the user
context := []interface{}{
"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
@@ -369,7 +443,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore) {
// Avoid database thrashing with tiny writes
if f.frozen-first < freezerBatchLimit {
- time.Sleep(freezerRecheckInterval)
+ backoff = true
}
}
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index bba6c4115e..e4dcc2d9db 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -38,6 +38,9 @@ var (
// headFastBlockKey tracks the latest known incomplete block's hash during fast sync.
headFastBlockKey = []byte("LastFast")
+ // lastPivotKey tracks the last pivot block used by fast sync (to reenable on sethead).
+ lastPivotKey = []byte("LastPivot")
+
// fastTrieProgressKey tracks the number of trie entries imported during fast sync.
fastTrieProgressKey = []byte("TrieSync")
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 4f2f603d3c..d31d5545a3 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -49,20 +49,21 @@ var (
MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
MaxStateFetch = 384 // Amount of node state values to allow fetching per request
- rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
- rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
- rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
- ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
- ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
+ rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests
+ rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests
+ rttDefaultEstimate = 5 * time.Second // Maximum round-trip time to target for download requests
+ rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value
+ ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion
+ ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts
qosTuningPeers = 5 // Number of peers to tune based on (best peers)
qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence
qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value
- maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
- maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
- maxResultsProcess = 2048 // Number of content download results to import at once into the chain
- maxForkAncestry uint64 = params.ImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
+ maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection)
+ maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
+ maxResultsProcess = 2048 // Number of content download results to import at once into the chain
+ maxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
@@ -185,10 +186,9 @@ type LightChain interface {
// InsertHeaderChain inserts a batch of headers into the local chain.
InsertHeaderChain([]*types.Header, int, bool) (int, error)
- // Rollback removes a few recently added elements from the local chain.
- Rollback([]common.Hash, bool)
-
Config() *params.ChainConfig
+ // SetHead rewinds the local chain to a new head.
+ SetHead(uint64) error
}
// BlockChain encapsulates functions required to sync a (full or fast) blockchain.
@@ -248,7 +248,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
checkpoint: checkpoint,
queue: newQueue(),
peers: newPeerSet(),
- rttEstimate: uint64(rttMaxEstimate),
+ rttEstimate: uint64(rttDefaultEstimate),
rttConfidence: uint64(1000000),
blockchain: chain,
lightchain: lightchain,
@@ -351,13 +351,28 @@ func (d *Downloader) UnregisterPeer(id string) error {
// adding various sanity checks as well as wrapping it with various log entries.
func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
err := d.synchronise(id, head, td, mode)
+
switch err {
- case nil:
- case errBusy, errCanceled:
+ case nil, errBusy, errCanceled:
+ return err
+ }
+
+ if errors.Is(err, errInvalidChain) {
+ log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
+ if d.dropPeer == nil {
+ // The dropPeer method is nil when `--copydb` is used for a local copy.
+ // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
+ log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
+ } else {
+ d.dropPeer(id)
+ }
+ return err
+ }
+ switch err {
case errTimeout, errBadPeer, errStallingPeer, errUnsyncedPeer,
errEmptyHeaderSet, errPeersUnavailable, errTooOld,
- errInvalidAncestor, errInvalidChain:
+ errInvalidAncestor:
log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
if d.dropPeer == nil {
// The dropPeer method is nil when `--copydb` is used for a local copy.
@@ -486,6 +501,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
pivot := uint64(0)
if d.Mode == FastSync {
pivot = d.calcPivot(height)
+ rawdb.WriteLastPivotNumber(d.stateDB, pivot)
if pivot == 0 {
origin = 0
} else if pivot <= origin {
@@ -517,6 +533,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
d.ancientLimit = height - maxForkAncestry - 1
}
frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
+
// If a part of blockchain data has already been written into active store,
// disable the ancient style insertion explicitly.
if origin >= frozen && frozen != 0 {
@@ -527,11 +544,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I
}
// Rewind the ancient store and blockchain if reorg happens.
if origin+1 < frozen {
- var hashes []common.Hash
- for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
- hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
+ if err := d.lightchain.SetHead(origin + 1); err != nil {
+ return err
}
- d.lightchain.Rollback(hashes, d.Mode.SyncFullHeaderChain())
}
}
// Initiate the sync using a concurrent header and content retrieval algorithm
@@ -805,7 +820,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header)
expectNumber := from + int64(i)*int64(skip+1)
if number := header.Number.Int64(); number != expectNumber {
p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number, "localHeight", localHeight, "remoteHeight", remoteHeight)
- return 0, errInvalidChain
+ return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
}
}
// Check if a common ancestor was found
@@ -1084,7 +1099,7 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64,
filled, proced, err := d.fillHeaderSkeleton(from, headers)
if err != nil {
p.log.Debug("Skeleton chain invalid", "err", err)
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
headers = filled[proced:]
from += uint64(proced)
@@ -1328,13 +1343,13 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
if peer := d.peers.Peer(packet.PeerId()); peer != nil {
// Deliver the received chunk of data and check chain validity
accepted, err := deliver(packet)
- if err == errInvalidChain {
+ if errors.Is(err, errInvalidChain) {
return err
}
// Unless a peer delivered something completely else than requested (usually
// caused by a timed out request which came through in the end), set it to
// idle. If the delivery's stale, the peer should have already been idled.
- if err != errStaleDelivery {
+ if !errors.Is(err, errStaleDelivery) {
setIdle(peer, accepted)
}
// Issue a log to the user to see what's going on
@@ -1480,38 +1495,40 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
// queue until the stream ends or a failure occurs.
func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
// Keep a count of uncertain headers to roll back
- var rollback []*types.Header
+ var (
+ rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis)
+ rollbackErr error
+ mode = d.Mode
+ )
defer func() {
- if len(rollback) > 0 {
- // Flatten the headers and roll them back
- hashes := make([]common.Hash, len(rollback))
- for i, header := range rollback {
- hashes[i] = header.Hash()
- }
+ if rollback > 0 {
lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
if d.Mode.SyncFullBlockChain() {
lastFastBlock = d.blockchain.CurrentFastBlock().Number()
lastBlock = d.blockchain.CurrentBlock().Number()
}
- d.lightchain.Rollback(hashes, d.Mode.SyncFullHeaderChain())
+ if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
+ // We're already unwinding the stack, only print the error to make it more visible
+ log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
+ }
curFastBlock, curBlock := common.Big0, common.Big0
if d.Mode.SyncFullBlockChain() {
curFastBlock = d.blockchain.CurrentFastBlock().Number()
curBlock = d.blockchain.CurrentBlock().Number()
}
- log.Warn("Rolled back headers", "count", len(hashes),
+ log.Warn("Rolled back chain segment",
"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
- "block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
+ "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
}
}()
-
// Wait for batches of headers to process
gotHeaders := false
for {
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
case headers := <-d.headerProcCh:
@@ -1539,6 +1556,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
if d.Mode.SyncFullBlockChain() {
head := d.blockchain.CurrentBlock()
if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
+ rollbackErr = errStallingPeer
return errStallingPeer
}
}
@@ -1553,11 +1571,12 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
if d.Mode == FastSync || d.Mode == LightSync {
head := d.lightchain.CurrentHeader()
if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
+ rollbackErr = errStallingPeer
return errStallingPeer
}
}
// Disable any rollback and return
- rollback = nil
+ rollback = 0
return nil
}
// Otherwise split the chunk of headers into batches and process them
@@ -1566,6 +1585,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
// Terminate if something failed in between processing chunks
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
default:
}
@@ -1575,34 +1595,30 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
limit = len(headers)
}
chunk := headers[:limit]
+
// In case of header only syncing, validate the chunk immediately
- if d.Mode == FastSync || !d.Mode.SyncFullBlockChain() {
- // Collect the yet unknown headers to mark them as uncertain
- unknown := make([]*types.Header, 0, len(chunk))
- for _, header := range chunk {
- if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
- unknown = append(unknown, header)
- }
- }
+ if mode == FastSync || !mode.SyncFullBlockChain() {
// If we're importing pure headers, verify based on their recentness
frequency := fsHeaderCheckFrequency
if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
frequency = 1
}
if n, err := d.lightchain.InsertHeaderChain(chunk, frequency, d.Mode.SyncFullHeaderChain()); err != nil {
- // If some headers were inserted, add them too to the rollback list
- if n > 0 {
- rollback = append(rollback, chunk[:n]...)
+ rollbackErr = err
+
+ // If some headers were inserted, track them as uncertain
+ if n > 0 && rollback == 0 {
+ rollback = chunk[0].Number.Uint64()
}
log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
- // All verifications passed, store newly found uncertain headers
- log.Trace(fmt.Sprintf("Adding headers for potential rollback: %v", headersToNumbers(unknown)))
- rollback = append(rollback, unknown...)
- if len(rollback) > fsHeaderSafetyNet {
- log.Debug("Adding some headers for rollback")
- rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
+ // All verifications passed, track all headers within the alloted limits
+ head := chunk[len(chunk)-1].Number.Uint64()
+ if head-rollback > uint64(fsHeaderSafetyNet) {
+ rollback = head - uint64(fsHeaderSafetyNet)
+ } else {
+ rollback = 1
}
}
// Unless we're doing light chains, schedule the headers for associated content retrieval
@@ -1611,6 +1627,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
select {
case <-d.cancelCh:
+ rollbackErr = errCanceled
return errCanceled
case <-time.After(time.Second):
}
@@ -1619,6 +1636,7 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
inserts := d.queue.Schedule(chunk, origin)
if len(inserts) != len(chunk) {
log.Debug("Stale headers")
+ rollbackErr = errBadPeer
return errBadPeer
}
}
@@ -1643,14 +1661,6 @@ func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) er
}
}
-func headersToNumbers(headers []*types.Header) []*big.Int {
- headerNumbers := make([]*big.Int, 0)
- for _, header := range headers {
- headerNumbers = append(headerNumbers, header.Number)
- }
- return headerNumbers
-}
-
// processFullSyncContent takes fetch results from the queue and imports them into the chain.
func (d *Downloader) processFullSyncContent() error {
for {
@@ -1697,7 +1707,7 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
// of the blocks delivered from the downloader, and the indexing will be off.
log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
}
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
return nil
}
@@ -1751,6 +1761,7 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
}
}
go closeOnErr(sync)
+
// Figure out the ideal pivot block. Note, that this goalpost may move if the
// sync takes long enough for the chain head to move significantly.
pivot := d.calcPivot(latest.Number.Uint64())
@@ -1790,6 +1801,9 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error {
newPivot := d.calcPivot(height)
log.Warn("Pivot became stale, moving", "old", pivot, "new", newPivot)
pivot = newPivot
+ // Write out the pivot into the database so a rollback beyond it will
+ // reenable fast sync
+ rawdb.WriteLastPivotNumber(d.stateDB, pivot)
}
}
P, beforeP, afterP := splitAroundPivot(pivot, results)
@@ -1872,7 +1886,7 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state
}
if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
- return errInvalidChain
+ return fmt.Errorf("%w: %v", errInvalidChain, err)
}
return nil
}
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index acecd20492..a772e01e63 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -154,7 +154,12 @@ func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {
func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {
dl.lock.RLock()
defer dl.lock.RUnlock()
+ return dl.getHeaderByHash(hash)
+}
+// getHeaderByHash returns the header if found either within ancients or own blocks)
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {
header := dl.ancientHeaders[hash]
if header != nil {
return header
@@ -242,6 +247,13 @@ func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
dl.lock.RLock()
defer dl.lock.RUnlock()
+ return dl.getTd(hash)
+}
+
+// getTd retrieves the block's total difficulty if found either within
+// ancients or own blocks).
+// This method assumes that the caller holds at least the read-lock (dl.lock)
+func (dl *downloadTester) getTd(hash common.Hash) *big.Int {
if td := dl.ancientChainTd[hash]; td != nil {
return td
}
@@ -252,27 +264,33 @@ func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {
func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int, contiguousHeaders bool) (i int, err error) {
dl.lock.Lock()
defer dl.lock.Unlock()
-
// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors
- if _, ok := dl.ownHeaders[headers[0].ParentHash]; !ok {
- return 0, errors.New("unknown parent")
+ if dl.getHeaderByHash(headers[0].ParentHash) == nil {
+ return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number)
}
+ var hashes []common.Hash
for i := 1; i < len(headers); i++ {
+ hash := headers[i-1].Hash()
if headers[i].ParentHash != headers[i-1].Hash() {
- return i, errors.New("unknown parent")
+ return i, fmt.Errorf("non-contiguous import at position %d", i)
}
+ hashes = append(hashes, hash)
}
+ hashes = append(hashes, headers[len(headers)-1].Hash())
// Do a full insert if pre-checks passed
for i, header := range headers {
- if _, ok := dl.ownHeaders[header.Hash()]; ok {
+ hash := hashes[i]
+ if dl.getHeaderByHash(hash) != nil {
continue
}
- if _, ok := dl.ownHeaders[header.ParentHash]; !ok {
- return i, errors.New("unknown parent")
+ if dl.getHeaderByHash(header.ParentHash) == nil {
+ // This _should_ be impossible, due to precheck and induction
+ return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i)
}
- dl.ownHashes = append(dl.ownHashes, header.Hash())
- dl.ownHeaders[header.Hash()] = header
- dl.ownChainTd[header.Hash()] = new(big.Int).Add(header.Number, big.NewInt(1))
+ dl.ownHashes = append(dl.ownHashes, hash)
+ dl.ownHeaders[hash] = header
+
+ dl.ownChainTd[hash] = new(big.Int).Add(header.Number, big.NewInt(1))
}
return len(headers), nil
}
@@ -281,12 +299,11 @@ func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq i
func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {
dl.lock.Lock()
defer dl.lock.Unlock()
-
for i, block := range blocks {
if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {
- return i, errors.New("unknown parent")
+ return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks))
} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {
- return i, fmt.Errorf("unknown parent state %x: %v", parent.Root(), err)
+ return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err)
}
if _, ok := dl.ownHeaders[block.Hash()]; !ok {
dl.ownHashes = append(dl.ownHashes, block.Hash())
@@ -311,7 +328,7 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
}
if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {
if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {
- return i, errors.New("unknown parent")
+ return i, errors.New("InsertReceiptChain: unknown parent")
}
}
if blocks[i].NumberU64() <= ancientLimit {
@@ -332,25 +349,52 @@ func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []typ
return len(blocks), nil
}
-// Rollback removes some recently added elements from the chain.
-func (dl *downloadTester) Rollback(hashes []common.Hash, fullHeaderChainAvailable bool) {
+// SetHead rewinds the local chain to a new head.
+func (dl *downloadTester) SetHead(head uint64) error {
dl.lock.Lock()
defer dl.lock.Unlock()
- for i := len(hashes) - 1; i >= 0; i-- {
- if dl.ownHashes[len(dl.ownHashes)-1] == hashes[i] {
- dl.ownHashes = dl.ownHashes[:len(dl.ownHashes)-1]
+ // Find the hash of the head to reset to
+ var hash common.Hash
+ for h, header := range dl.ownHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
+ }
+ }
+ for h, header := range dl.ancientHeaders {
+ if header.Number.Uint64() == head {
+ hash = h
}
- delete(dl.ownChainTd, hashes[i])
- delete(dl.ownHeaders, hashes[i])
- delete(dl.ownReceipts, hashes[i])
- delete(dl.ownBlocks, hashes[i])
+ }
+ if hash == (common.Hash{}) {
+ return fmt.Errorf("unknown head to set: %d", head)
+ }
+ // Find the offset in the header chain
+ var offset int
+ for o, h := range dl.ownHashes {
+ if h == hash {
+ offset = o
+ break
+ }
+ }
+ // Remove all the hashes and associated data afterwards
+ for i := offset + 1; i < len(dl.ownHashes); i++ {
+ delete(dl.ownChainTd, dl.ownHashes[i])
+ delete(dl.ownHeaders, dl.ownHashes[i])
+ delete(dl.ownReceipts, dl.ownHashes[i])
+ delete(dl.ownBlocks, dl.ownHashes[i])
- delete(dl.ancientChainTd, hashes[i])
- delete(dl.ancientHeaders, hashes[i])
- delete(dl.ancientReceipts, hashes[i])
- delete(dl.ancientBlocks, hashes[i])
+ delete(dl.ancientChainTd, dl.ownHashes[i])
+ delete(dl.ancientHeaders, dl.ownHashes[i])
+ delete(dl.ancientReceipts, dl.ownHashes[i])
+ delete(dl.ancientBlocks, dl.ownHashes[i])
}
+ dl.ownHashes = dl.ownHashes[:offset+1]
+ return nil
+}
+
+// Rollback removes some recently added elements from the chain.
+func (dl *downloadTester) Rollback(hashes []common.Hash) {
}
// newPeer registers a new block download source into the downloader.
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 2e5b205e74..6c7f0beaf1 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -510,7 +510,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common
index := int(header.Number.Int64() - int64(q.resultOffset))
if index >= len(q.resultCache) || index < 0 {
common.Report("index allocation went beyond available resultCache space")
- return nil, false, errInvalidChain
+ return nil, false, fmt.Errorf("%w: index allocation went beyond available resultCache space", errInvalidChain)
}
if q.resultCache[index] == nil {
components := 1
@@ -862,14 +862,16 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, taskQ
q.active.Signal()
}
// If none of the data was good, it's a stale delivery
- switch {
- case failure == nil || failure == errInvalidChain:
+ if failure == nil {
+ return accepted, nil
+ }
+ if errors.Is(failure, errInvalidChain) {
return accepted, failure
- case useful:
+ }
+ if useful {
return accepted, fmt.Errorf("partial failure: %v", failure)
- default:
- return accepted, errStaleDelivery
}
+ return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery)
}
// Prepare configures the result cache to allow accepting and caching inbound
diff --git a/eth/handler.go b/eth/handler.go
index ae98413bb5..b5e7885b9d 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -77,6 +77,7 @@ type ProtocolManager struct {
txpool txPool
blockchain *core.BlockChain
+ chaindb ethdb.Database
maxPeers int
downloader *downloader.Downloader
@@ -118,6 +119,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
eventMux: mux,
txpool: txpool,
blockchain: blockchain,
+ chaindb: chaindb,
peers: newPeerSet(),
whitelist: whitelist,
newPeerCh: make(chan *peer),
diff --git a/eth/sync.go b/eth/sync.go
index 9e180ee200..2bb14b29fd 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -22,6 +22,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
@@ -170,22 +171,33 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
currentBlock := pm.blockchain.CurrentBlock()
td := pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
- pHead, pTd := peer.Head()
- if pTd.Cmp(td) <= 0 {
- return
- }
// Otherwise try to sync with the downloader
mode := downloader.FullSync
if atomic.LoadUint32(&pm.fastSync) == 1 {
// Fast sync was explicitly requested, and explicitly granted
mode = downloader.FastSync
+ } else if pivot := rawdb.ReadLastPivotNumber(pm.chaindb); pivot != nil {
+ if currentBlock.NumberU64() < *pivot {
+ block := pm.blockchain.CurrentFastBlock()
+ td = pm.blockchain.GetTdByHash(block.Hash())
+ mode = downloader.FastSync
+ }
+ }
+
+ pHead, pTd := peer.Head()
+ if pTd.Cmp(td) <= 0 {
+ return
}
+
if mode == downloader.FastSync {
// Make sure the peer's total difficulty we are synchronizing is higher.
- if pm.blockchain.GetTdByHash(pm.blockchain.CurrentFastBlock().Hash()).Cmp(pTd) >= 0 {
+ currentBlock := pm.blockchain.CurrentFastBlock().Hash()
+ td := pm.blockchain.GetTdByHash(currentBlock)
+ if td.Cmp(pTd) >= 0 {
return
}
}
+
// Run the sync cycle, and disable fast sync if we've went past the pivot block
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {
return
diff --git a/les/client.go b/les/client.go
index 9c605fdaa3..067dfdc8c7 100644
--- a/les/client.go
+++ b/les/client.go
@@ -114,7 +114,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
engine: eth.CreateConsensusEngine(ctx, chainConfig, config, nil, false, chainDb),
networkId: config.NetworkId,
bloomRequests: make(chan chan *bloombits.Retrieval),
- bloomIndexer: eth.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations, fullChainAvailable),
serverPool: newServerPool(chainDb, config.UltraLightServers),
}
@@ -129,9 +128,10 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever)
// If the full chain is not available then indexing each block header isn't possible.
if fullChainAvailable {
+ leth.bloomIndexer = eth.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations, fullChainAvailable)
leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations, fullChainAvailable)
+ leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency, fullChainAvailable)
}
- leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency, fullChainAvailable)
leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)
checkpoint := config.Checkpoint
@@ -159,11 +159,13 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
leth.oracle = checkpointoracle.New(oracle, leth.localCheckpoint)
// Note: AddChildIndexer starts the update process for the child
- leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
+ if leth.bloomIndexer != nil && leth.bloomTrieIndexer != nil {
+ leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
+ leth.bloomIndexer.Start(leth.blockchain)
+ }
if leth.chtIndexer != nil {
leth.chtIndexer.Start(leth.blockchain)
}
- leth.bloomIndexer.Start(leth.blockchain)
// TODO mcortesi (needs etherbase & gatewayFee?)
leth.handler = newClientHandler(syncMode, config.UltraLightServers, config.UltraLightFraction, checkpoint, leth, config.GatewayFee)
@@ -306,7 +308,9 @@ func (s *LightEthereum) Stop() error {
s.reqDist.close()
s.odr.Stop()
s.relay.Stop()
- s.bloomIndexer.Close()
+ if s.bloomIndexer != nil {
+ s.bloomIndexer.Close()
+ }
if s.chtIndexer != nil {
s.chtIndexer.Close()
}
diff --git a/les/commons.go b/les/commons.go
index d93712e1a5..bb4b24bcac 100644
--- a/les/commons.go
+++ b/les/commons.go
@@ -115,13 +115,18 @@ func (c *lesCommons) nodeInfo() interface{} {
// section index and head hash as a local checkpoint package.
func (c *lesCommons) latestLocalCheckpoint() params.TrustedCheckpoint {
var sections uint64
+ var sections2 uint64
if c.chtIndexer == nil {
sections = 0
} else {
sections, _, _ = c.chtIndexer.Sections()
}
- sections2, _, _ := c.bloomTrieIndexer.Sections()
+ if c.bloomTrieIndexer == nil {
+ sections2 = 0
+ } else {
+ sections2, _, _ = c.bloomTrieIndexer.Sections()
+ }
// Cap the section index if the two sections are not consistent.
if sections > sections2 {
sections = sections2
diff --git a/light/lightchain.go b/light/lightchain.go
index 4bc311351b..969413ff9a 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -174,7 +174,6 @@ func (lc *LightChain) loadLastState() error {
func (lc *LightChain) SetHead(head uint64) error {
lc.chainmu.Lock()
defer lc.chainmu.Unlock()
-
lc.hc.SetHead(head, nil, nil)
return lc.loadLastState()
}
diff --git a/light/txpool.go b/light/txpool.go
index bbed500cf7..52dbfe291f 100644
--- a/light/txpool.go
+++ b/light/txpool.go
@@ -18,6 +18,7 @@ package light
import (
"context"
+ "errors"
"fmt"
"math/big"
"sync"
@@ -225,6 +226,10 @@ func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) {
func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) {
txc := make(txStateChanges)
oldh := pool.chain.GetHeaderByHash(pool.head)
+ if oldh == nil {
+ pool.head = newHeader.Hash()
+ return nil, errors.New("Old header lost")
+ }
newh := newHeader
// find common ancestor, create list of rolled back and new block hashes
var oldHashes, newHashes []common.Hash
@@ -310,7 +315,10 @@ func (pool *TxPool) setNewHead(head *types.Header) {
ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout)
defer cancel()
- txc, _ := pool.reorgOnNewHead(ctx, head)
+ txc, err := pool.reorgOnNewHead(ctx, head)
+ if err != nil {
+ log.Warn("Cannot reorg", "err", err)
+ }
m, r := txc.getLists()
pool.relay.NewHead(pool.head, m, r)
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 5670e970cd..6cd7094267 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -305,9 +305,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
}
w.skipSealHook = func(task *task) bool { return true }
w.fullTaskHook = func() {
- // Arch64 unit tests are running in a VM on travis, they must
- // be given more time to execute.
- time.Sleep(time.Second)
+ time.Sleep(100 * time.Millisecond)
}
w.start() // Start mining!
expectedTasksLen := 1
diff --git a/params/network_params.go b/params/network_params.go
index bba24721c1..68255b70c0 100644
--- a/params/network_params.go
+++ b/params/network_params.go
@@ -57,5 +57,5 @@ const (
// considered immutable (i.e. soft finality). It is used by the downloader as a
// hard limit against deep ancestors, by the blockchain against deep reorgs, by
// the freezer as the cutoff treshold and by clique as the snapshot trust limit.
- ImmutabilityThreshold = 90000
+ FullImmutabilityThreshold = 90000
)
diff --git a/trie/sync.go b/trie/sync.go
index e5a0c17493..f21a919c75 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -99,7 +99,7 @@ func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callb
if _, ok := s.membatch.batch[root]; ok {
return
}
- if s.bloom.Contains(root[:]) {
+ if s.bloom == nil || s.bloom.Contains(root[:]) {
// Bloom filter says this might be a duplicate, double check
blob, _ := s.database.Get(root[:])
if local, err := decodeNode(root[:], blob); local != nil && err == nil {
@@ -138,7 +138,7 @@ func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) {
if _, ok := s.membatch.batch[hash]; ok {
return
}
- if s.bloom.Contains(hash[:]) {
+ if s.bloom == nil || s.bloom.Contains(hash[:]) {
// Bloom filter says this might be a duplicate, double check
if ok, _ := s.database.Has(hash[:]); ok {
return
@@ -300,7 +300,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
if _, ok := s.membatch.batch[hash]; ok {
continue
}
- if s.bloom.Contains(node) {
+ if s.bloom == nil || s.bloom.Contains(node) {
// Bloom filter says this might be a duplicate, double check
if ok, _ := s.database.Has(node); ok {
continue