diff --git a/core/blockchain.go b/core/blockchain.go
index 9aef138f3a..6e50c8da56 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1004,14 +1004,14 @@ func (bc *BlockChain) Stop() {
recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
- if err := triedb.Commit(recent.Root(), true, nil); err != nil {
+ if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
}
if snapBase != (common.Hash{}) {
log.Info("Writing snapshot state to disk", "root", snapBase)
- if err := triedb.Commit(snapBase, true, nil); err != nil {
+ if err := triedb.Commit(snapBase, true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
@@ -1568,7 +1568,7 @@ func (bc *BlockChain) writeBlockWithState(
// If we're running an archive node, always flush
if bc.cacheConfig.TrieDirtyDisabled {
- if err := bc.triedb.Commit(root, false, nil); err != nil {
+ if err := bc.triedb.Commit(root, false); err != nil {
return NonStatTy, err
}
} else {
@@ -1608,7 +1608,7 @@ func (bc *BlockChain) writeBlockWithState(
)
}
// Flush an entire trie and restart the counters
- bc.triedb.Commit(header.Root, true, nil)
+ bc.triedb.Commit(header.Root, true)
lastWrite = chosen
bc.gcproc = 0
}
diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go
index 9e100a854a..81503ad132 100644
--- a/core/blockchain_repair_test.go
+++ b/core/blockchain_repair_test.go
@@ -1810,7 +1810,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
@@ -1935,7 +1935,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[:1], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[0].Root(), true)
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2], nil); err != nil {
@@ -1949,7 +1949,7 @@ func TestIssue23496(t *testing.T) {
if _, err := chain.InsertChain(blocks[2:3], nil); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
- chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[2].Root(), true)
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:], nil); err != nil {
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 11c7693436..3a66101c1c 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -2009,7 +2009,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if tt.commitBlock > 0 {
- chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), true)
if snapshots {
if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go
index e5affaab9b..c4e1229648 100644
--- a/core/blockchain_snapshot_test.go
+++ b/core/blockchain_snapshot_test.go
@@ -106,7 +106,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo
startPoint = point
if basic.commitBlock > 0 && basic.commitBlock == point {
- chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
+ chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true)
}
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
// Flushing the entire snap tree into the disk, the
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 6d786c5f0c..e52936c3fd 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -1560,7 +1560,7 @@ func TestTrieForkGC(t *testing.T) {
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
}
- if len(chain.stateCache.TrieDB().Nodes()) > 0 {
+ if nodes, _ := chain.TrieDB().Size(); nodes > 0 {
t.Fatalf("stale tries still alive after garbase collection")
}
}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 5cec7c0214..0554528a96 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -323,7 +323,7 @@ func generateChain(
panic(fmt.Sprintf("state write error: %v", err))
}
if flushDisk {
- if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
+ if err := statedb.Database().TrieDB().Commit(root, false); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
}
diff --git a/core/dao_test.go b/core/dao_test.go
index adf3464bd3..2fa5b4e26c 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -94,7 +94,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -119,7 +119,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -145,7 +145,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import contra-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit contra-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&proConf, conBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
@@ -165,7 +165,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
if _, err := bc.InsertChain(blocks, nil); err != nil {
t.Fatalf("failed to import pro-fork chain for expansion: %v", err)
}
- if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil {
+ if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true); err != nil {
t.Fatalf("failed to commit pro-fork head for expansion: %v", err)
}
blocks, _ = GenerateChain(&conConf, proBc.CurrentBlock(), ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {}, true)
diff --git a/core/genesis.go b/core/genesis.go
index b3f6e1b7fc..5b248c5bbf 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -123,7 +123,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database) error {
}
// Commit newly generated states into disk if it's not empty.
if root != types.EmptyRootHash {
- if err := triedb.Commit(root, true, nil); err != nil {
+ if err := triedb.Commit(root, true); err != nil {
return err
}
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index ae9cbcff96..439e47df9f 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -22,6 +22,7 @@ import (
"encoding/binary"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
)
@@ -103,7 +104,7 @@ var (
internalTxsPrefix = []byte("itxs") // internalTxsPrefix + block hash -> internal transactions
dirtyAccountsKey = []byte("dacc") // dirtyAccountsPrefix + block hash -> dirty accounts
- // Path-based trie node scheme.
+ // Path-based storage scheme of merkle patricia trie.
trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node
trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node
@@ -250,3 +251,48 @@ func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
func snapshotConsortiumKey(hash common.Hash) []byte {
return append(snapshotConsortiumPrefix, hash.Bytes()...)
}
+
+// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
+// node. The characteristics of legacy trie node are:
+// - the key length is 32 bytes
+// - the key is the hash of val
+func IsLegacyTrieNode(key []byte, val []byte) bool {
+ if len(key) != common.HashLength {
+ return false
+ }
+ return bytes.Equal(key, crypto.Keccak256(val))
+}
+
+// IsAccountTrieNode reports whether a provided database entry is an account
+// trie node in path-based state scheme.
+func IsAccountTrieNode(key []byte) (bool, []byte) {
+ if !bytes.HasPrefix(key, trieNodeAccountPrefix) {
+ return false, nil
+ }
+ // The remaining key should only consist a hex node path
+ // whose length is in the range 0 to 64 (64 is excluded
+ // since leaves are always wrapped with shortNode).
+ if len(key) >= len(trieNodeAccountPrefix)+common.HashLength*2 {
+ return false, nil
+ }
+ return true, key[len(trieNodeAccountPrefix):]
+}
+
+// IsStorageTrieNode reports whether a provided database entry is a storage
+// trie node in path-based state scheme.
+func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) {
+ if !bytes.HasPrefix(key, trieNodeStoragePrefix) {
+ return false, common.Hash{}, nil
+ }
+ // The remaining key consists of 2 parts:
+ // - 32 bytes account hash
+ // - hex node path whose length is in the range 0 to 64
+ if len(key) < len(trieNodeStoragePrefix)+common.HashLength {
+ return false, common.Hash{}, nil
+ }
+ if len(key) >= len(trieNodeStoragePrefix)+common.HashLength+common.HashLength*2 {
+ return false, common.Hash{}, nil
+ }
+ accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength])
+ return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:]
+}
diff --git a/core/state/database.go b/core/state/database.go
index d2837c83f9..6991e85843 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
lru "github.com/hashicorp/golang-lru/v2"
)
@@ -92,7 +93,7 @@ type Trie interface {
// corresponding node hash. All collected nodes(including dirty leaves if
// collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
- Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error)
+ Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error)
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key.
diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go
index 7669ac97a2..b093083db2 100644
--- a/core/state/iterator_test.go
+++ b/core/state/iterator_test.go
@@ -27,7 +27,7 @@ import (
func TestNodeIteratorCoverage(t *testing.T) {
// Create some arbitrary test state to iterate
db, sdb, root, _ := makeTestState()
- sdb.TrieDB().Commit(root, false, nil)
+ sdb.TrieDB().Commit(root, false)
state, err := New(root, sdb, nil)
if err != nil {
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index af4279da6d..e824a91bc5 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -29,12 +29,14 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -438,9 +440,9 @@ func (dl *diskLayer) generateRange(trieID *trie.ID, prefix []byte, kind string,
}
root, nodes, _ := snapTrie.Commit(false)
if nodes != nil {
- snapTrieDb.Update(trie.NewWithNodeSet(nodes))
+ snapTrieDb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
- snapTrieDb.Commit(root, false, nil)
+ snapTrieDb.Commit(root, false)
}
tr := result.tr
if tr == nil {
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index f4d2eeb15b..51138bbc19 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -25,10 +25,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -138,7 +140,7 @@ type testHelper struct {
diskdb ethdb.Database
triedb *trie.Database
accTrie *trie.SecureTrie
- nodes *trie.MergedNodeSet
+ nodes *trienode.MergedNodeSet
}
func newHelper() *testHelper {
@@ -149,7 +151,7 @@ func newHelper() *testHelper {
diskdb: diskdb,
triedb: triedb,
accTrie: accTrie,
- nodes: trie.NewMergedNodeSet(),
+ nodes: trienode.NewMergedNodeSet(),
}
}
@@ -196,8 +198,8 @@ func (t *testHelper) Commit() common.Hash {
if nodes != nil {
t.nodes.Merge(nodes)
}
- t.triedb.Update(t.nodes)
- t.triedb.Commit(root, false, nil)
+ t.triedb.Update(root, types.EmptyRootHash, t.nodes)
+ t.triedb.Commit(root, false)
return root
}
@@ -385,7 +387,7 @@ func TestGenerateCorruptAccountTrie(t *testing.T) {
root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978
// Delete an account trie leaf and ensure the generator chokes
- helper.triedb.Commit(root, false, nil)
+ helper.triedb.Commit(root, false)
helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes())
snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root)
diff --git a/core/state/state_object.go b/core/state/state_object.go
index bb9e3d6781..3adcff01e9 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -394,7 +394,7 @@ func (s *stateObject) updateRoot(db Database) {
// commitTrie submits the storage changes into the storage trie and re-computes
// the root. Besides, all trie changes will be collected in a nodeset and returned.
-func (s *stateObject) commitTrie(db Database) (*trie.NodeSet, error) {
+func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) {
// If nothing changed, don't bother with hashing anything
if s.updateTrie(db) == nil {
return nil, nil
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 898e65597c..c15613de40 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
type revision struct {
@@ -978,7 +979,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
accountTrieNodesDeleted int
storageTrieNodesUpdated int
storageTrieNodesDeleted int
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
@@ -1082,7 +1083,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
if root != origin {
start := time.Now()
- if err := s.db.TrieDB().Update(nodes); err != nil {
+ if err := s.db.TrieDB().Update(root, origin, nodes); err != nil {
return common.Hash{}, err
}
s.originalRoot = root
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index be7d4e281e..f11a11731e 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -55,7 +55,7 @@ func TestUpdateLeaks(t *testing.T) {
}
root := state.IntermediateRoot(false)
- if err := state.Database().TrieDB().Commit(root, false, nil); err != nil {
+ if err := state.Database().TrieDB().Commit(root, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", root.Hex())
}
@@ -106,7 +106,7 @@ func TestIntermediateLeaks(t *testing.T) {
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
- if err = transState.Database().TrieDB().Commit(transRoot, false, nil); err != nil {
+ if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
@@ -114,7 +114,7 @@ func TestIntermediateLeaks(t *testing.T) {
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
- if err = finalState.Database().TrieDB().Commit(finalRoot, false, nil); err != nil {
+ if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil {
t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex())
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index ffea17cee2..d7334f0639 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -175,7 +175,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
// Create a random state to copy
_, srcDb, srcRoot, srcAccounts := makeTestState()
if commit {
- srcDb.TrieDB().Commit(srcRoot, false, nil)
+ srcDb.TrieDB().Commit(srcRoot, false)
}
srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB())
@@ -329,7 +329,8 @@ func TestIterativeDelayedStateSync(t *testing.T) {
if len(nodeElements) > 0 {
nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1)
for i, element := range nodeElements[:len(nodeResults)] {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve contract bytecode for %x", element.code)
}
@@ -415,7 +416,8 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
if len(nodeQueue) > 0 {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path)
}
@@ -503,7 +505,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
for path, element := range nodeQueue {
delete(nodeQueue, path)
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
@@ -603,7 +606,8 @@ func TestIncompleteStateSync(t *testing.T) {
if len(nodeQueue) > 0 {
results := make([]trie.NodeSyncResult, 0, len(nodeQueue))
for path, element := range nodeQueue {
- data, err := srcDb.TrieDB().Node(element.hash)
+ owner, inner := trie.ResolvePath([]byte(element.path))
+ data, err := srcDb.TrieDB().Reader(srcRoot).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x", element.hash)
}
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index ab3f691c84..0a0f3b9d76 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -1381,7 +1382,7 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1442,7 +1443,7 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, entrySlice) {
// Commit the state changes into db and re-create the trie
// for accessing later.
root, nodes, _ := accTrie.Commit(false)
- db.Update(trie.NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
accTrie, _ = trie.New(trie.StateTrieID(root), db)
return db.Scheme(), accTrie, entries
@@ -1458,7 +1459,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1490,7 +1491,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool)
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(nodes)
+ db.Update(root, types.EmptyRootHash, nodes)
// Re-create tries with new root
accTrie, _ = trie.New(trie.StateTrieID(root), db)
@@ -1511,7 +1512,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
storageRoots = make(map[common.Hash]common.Hash)
storageTries = make(map[common.Hash]*trie.Trie)
storageEntries = make(map[common.Hash]entrySlice)
- nodes = trie.NewMergedNodeSet()
+ nodes = trienode.NewMergedNodeSet()
)
// Create n accounts in the trie
for i := uint64(1); i <= uint64(accounts); i++ {
@@ -1523,7 +1524,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
// Make a storage trie
var (
stRoot common.Hash
- stNodes *trie.NodeSet
+ stNodes *trienode.NodeSet
stEntries entrySlice
)
if boundary {
@@ -1552,7 +1553,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
nodes.Merge(set)
// Commit gathered dirty nodes into database
- db.Update(nodes)
+ db.Update(root, types.EmptyRootHash, nodes)
// Re-create tries with new root
accTrie, err := trie.New(trie.StateTrieID(root), db)
@@ -1573,7 +1574,7 @@ func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (strin
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
-func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
+func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
trie, _ := trie.New(trie.StorageTrieID(common.Hash{}, owner, common.Hash{}), db)
var entries entrySlice
for i := uint64(1); i <= n; i++ {
@@ -1596,7 +1597,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
-func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trie.NodeSet, entrySlice) {
+func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, entrySlice) {
var (
entries entrySlice
boundaries []common.Hash
diff --git a/light/postprocess.go b/light/postprocess.go
index d6ba1089a7..0f3dce0f17 100644
--- a/light/postprocess.go
+++ b/light/postprocess.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// IndexerConfig includes a set of configs for chain indexers.
@@ -138,6 +139,7 @@ type ChtIndexerBackend struct {
section, sectionSize uint64
lastHash common.Hash
trie *trie.Trie
+ originRoot common.Hash
}
// NewChtIndexer creates a Cht chain indexer
@@ -196,6 +198,7 @@ func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSecti
}
}
c.section = section
+ c.originRoot = root
return err
}
@@ -223,7 +226,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
// Commite trie changes into trie database in case it's not nil.
if nodes != nil {
- if err := c.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := c.triedb.Update(root, c.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -236,7 +239,7 @@ func (c *ChtIndexerBackend) Commit() error {
if !c.disablePruning {
// Flush the triedb and track the latest trie nodes.
c.trieset.Clear()
- c.triedb.Commit(root, false, func(hash common.Hash) { c.trieset.Add(hash) })
+ c.triedb.Commit(root, false)
it := c.trieTable.NewIterator(nil, nil)
defer it.Release()
@@ -257,7 +260,7 @@ func (c *ChtIndexerBackend) Commit() error {
}
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
- c.triedb.Commit(root, false, nil)
+ c.triedb.Commit(root, false)
}
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root))
StoreChtRoot(c.diskdb, c.section, c.lastHash, root)
@@ -341,6 +344,7 @@ type BloomTrieIndexerBackend struct {
bloomTrieRatio uint64
trie *trie.Trie
sectionHeads []common.Hash
+ originRoot common.Hash
}
// NewBloomTrieIndexer creates a BloomTrie chain indexer
@@ -470,7 +474,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
if nodes != nil {
- if err := b.triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := b.triedb.Update(root, b.originRoot, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -484,7 +488,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
if !b.disablePruning {
// Flush the triedb and track the latest trie nodes.
b.trieset.Clear()
- b.triedb.Commit(root, false, func(hash common.Hash) { b.trieset.Add(hash) })
+ b.triedb.Commit(root, false)
it := b.trieTable.NewIterator(nil, nil)
defer it.Release()
@@ -505,7 +509,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
}
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", remaining, "elapsed", common.PrettyDuration(time.Since(t)))
} else {
- b.triedb.Commit(root, false, nil)
+ b.triedb.Commit(root, false)
}
sectionHead := b.sectionHeads[b.bloomTrieRatio-1]
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root)
diff --git a/light/trie.go b/light/trie.go
index e60ad49c97..a09488a4ba 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -136,7 +137,7 @@ func (t *odrTrie) TryDelete(key []byte) error {
})
}
-func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trie.NodeSet, error) {
+func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
if t.trie == nil {
return t.id.Root, nil, nil
}
diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go
index a0ba68e211..f1f8e94c3d 100644
--- a/tests/fuzzers/stacktrie/trie_fuzzer.go
+++ b/tests/fuzzers/stacktrie/trie_fuzzer.go
@@ -27,9 +27,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -187,10 +189,10 @@ func (f *fuzzer) fuzz() int {
panic(err)
}
if nodes != nil {
- dbA.Update(trie.NewWithNodeSet(nodes))
+ dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
// Flush memdb -> disk (sponge)
- dbA.Commit(rootA, false, nil)
+ dbA.Commit(rootA, false)
// Stacktrie requires sorted insertion
sort.Sort(vals)
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
index 4be8ebb9e8..12165d5f54 100644
--- a/tests/fuzzers/trie/trie-fuzzer.go
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -22,7 +22,9 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// randTest performs random trie operations.
@@ -142,10 +144,12 @@ func Fuzz(input []byte) int {
}
func runRandTest(rt randTest) error {
- triedb := trie.NewDatabase(rawdb.NewMemoryDatabase())
-
- tr := trie.NewEmpty(triedb)
- values := make(map[string]string) // tracks content of the trie
+ var (
+ triedb = trie.NewDatabase(rawdb.NewMemoryDatabase())
+ tr = trie.NewEmpty(triedb)
+ origin = types.EmptyRootHash
+ values = make(map[string]string) // tracks content of the trie
+ )
for i, step := range rt {
switch step.op {
@@ -169,7 +173,7 @@ func runRandTest(rt randTest) error {
return err
}
if nodes != nil {
- if err := triedb.Update(trie.NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(hash, origin, trienode.NewWithNodeSet(nodes)); err != nil {
return err
}
}
@@ -178,6 +182,7 @@ func runRandTest(rt randTest) error {
return err
}
tr = newtr
+ origin = hash
case opItercheckhash:
checktr := trie.NewEmpty(triedb)
it := trie.NewIterator(tr.NodeIterator(nil))
diff --git a/trie/committer.go b/trie/committer.go
index b19316631f..83d08f4dcd 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -30,13 +31,6 @@ import (
// some parallelism but not incur too much memory overhead.
const leafChanSize = 200
-// leaf represents a trie leaf value
-type leaf struct {
- blob []byte // raw blob of leaf
- parent common.Hash // the hash of parent node
- path []byte // the path from the root node
-}
-
// committer is a type used for the trie Commit operation. The committer will
// capture all dirty nodes during the commit process and keep them cached in
// insertion order.
@@ -45,7 +39,7 @@ type committer struct {
sha crypto.KeccakState
owner common.Hash // TODO: same as nodes.owner, consider removing
- nodes *NodeSet
+ nodes *trienode.NodeSet
tracer *tracer
collectLeaf bool
}
@@ -61,7 +55,7 @@ var committerPool = sync.Pool{
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(nodes *NodeSet, tracer *tracer, collectLeaf bool) *committer {
+func newCommitter(nodes *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer {
return &committer{
nodes: nodes,
tracer: tracer,
@@ -70,7 +64,7 @@ func newCommitter(nodes *NodeSet, tracer *tracer, collectLeaf bool) *committer {
}
// Commit collapses a node down into a hash node and inserts it into the database
-func (c *committer) Commit(n node) (hashNode, *NodeSet, error) {
+func (c *committer) Commit(n node) (hashNode, *trienode.NodeSet, error) {
h, err := c.commit(nil, n)
if err != nil {
return nil, nil, err
@@ -176,7 +170,7 @@ func (c *committer) store(path []byte, n node) node {
// deleted only if the node was existent in database before.
prev, ok := c.tracer.accessList[string(path)]
if ok {
- c.nodes.addNode(path, &nodeWithPrev{&memoryNode{}, prev})
+ c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev))
}
return n
}
@@ -185,24 +179,22 @@ func (c *committer) store(path []byte, n node) node {
var (
nhash = common.BytesToHash(hash)
blob, _ = rlp.EncodeToBytes(n)
- node = &nodeWithPrev{
- &memoryNode{
- nhash,
- blob,
- },
+ node = trienode.NewWithPrev(
+ nhash,
+ blob,
c.tracer.accessList[string(path)],
- }
+ )
)
// Collect the dirty node to nodeset for return.
- c.nodes.addNode(path, node)
+ c.nodes.AddNode(path, node)
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
// length of leaves should be exactly same.
if c.collectLeaf {
if sn, ok := n.(*shortNode); ok {
if val, ok := sn.Val.(valueNode); ok {
- c.nodes.addLeaf(&leaf{blob: val, parent: nhash})
+ c.nodes.AddLeaf(nhash, val)
}
}
}
@@ -214,7 +206,7 @@ type mptResolver struct{}
// ForEach implements childResolver, decodes the provided node and
// traverses the children inside.
-func (resolver mptResolver) forEach(node []byte, onChild func(common.Hash)) {
+func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) {
forGatherChildren(mustDecodeNode(nil, node), onChild)
}
diff --git a/trie/database_test.go b/trie/database_test.go
index 54d7529476..f81dc135ca 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -17,17 +17,20 @@
package trie
import (
- "testing"
-
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
)
-// Tests that the trie database returns a missing trie node error if attempting
-// to retrieve the meta root.
-func TestDatabaseMetarootFetch(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase())
- if _, err := db.Node(common.Hash{}); err == nil {
- t.Fatalf("metaroot retrieval succeeded")
+// newTestDatabase initializes the trie database with specified scheme.
+
+func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
+ db := prepare(diskdb, nil)
+ if scheme == rawdb.HashScheme {
+ db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
}
+ //} else {
+ // db.backend = snap.New(diskdb, db.cleans, nil)
+ //}
+ return db
}
diff --git a/trie/database_wrap.go b/trie/database_wrap.go
new file mode 100644
index 0000000000..78b8e39083
--- /dev/null
+++ b/trie/database_wrap.go
@@ -0,0 +1,287 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "errors"
+ "runtime"
+ "time"
+
+ "github.com/VictoriaMetrics/fastcache"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
+// Config defines all necessary options for database.
+type Config struct {
+ Cache int // Memory allowance (MB) to use for caching trie nodes in memory
+ Journal string // Journal of clean cache to survive node restarts
+ Preimages bool // Flag whether the preimage of trie key is recorded
+}
+
+// backend defines the methods needed to access/update trie nodes in different
+// state scheme.
+type backend interface {
+ // Scheme returns the identifier of used storage scheme.
+ Scheme() string
+
+ // Initialized returns an indicator if the state data is already initialized
+ // according to the state scheme.
+ Initialized(genesisRoot common.Hash) bool
+
+ // Size returns the current storage size of the memory cache in front of the
+ // persistent database layer.
+ Size() common.StorageSize
+
+ // Update performs a state transition by committing dirty nodes contained
+ // in the given set in order to update state from the specified parent to
+ // the specified root.
+ Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error
+
+ // Nodes retrieves the hashes of all the nodes cached within the memory database.
+ // This method is extremely expensive and should only be used to validate internal
+ // states in test code.
+ Nodes() []common.Hash
+
+ // DiskDB retrieves the persistent storage backing the trie database.
+ DiskDB() ethdb.KeyValueStore
+
+ // Commit writes all relevant trie nodes belonging to the specified state
+ // to disk. Report specifies whether logs will be displayed in info level.
+ Commit(root common.Hash, report bool, callback func(common.Hash)) error
+
+ // Close closes the trie database backend and releases all held resources.
+ Close() error
+}
+
+// Database is the wrapper of the underlying backend which is shared by different
+// types of node backend as an entrypoint. It's responsible for all interactions
+// relevant with trie nodes and node preimages.
+type Database struct {
+ config *Config // Configuration for trie database
+ diskdb ethdb.Database // Persistent database to store the snapshot
+ cleans *fastcache.Cache // Megabytes permitted using for read caches
+ preimages *preimageStore // The store for caching preimages
+ backend backend // The backend for managing trie nodes
+}
+
+// prepare initializes the database with provided configs, but the
+// database backend is still left as nil.
+func prepare(diskdb ethdb.Database, config *Config) *Database {
+ var cleans *fastcache.Cache
+ if config != nil && config.Cache > 0 {
+ if config.Journal == "" {
+ cleans = fastcache.New(config.Cache * 1024 * 1024)
+ } else {
+ cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
+ }
+ }
+ var preimages *preimageStore
+ if config != nil && config.Preimages {
+ preimages = newPreimageStore(diskdb)
+ }
+ return &Database{
+ config: config,
+ diskdb: diskdb,
+ cleans: cleans,
+ preimages: preimages,
+ }
+}
+
+// NewDatabase initializes the trie database with default settings, namely
+// the legacy hash-based scheme is used by default.
+func NewDatabase(diskdb ethdb.Database) *Database {
+ return NewDatabaseWithConfig(diskdb, nil)
+}
+
+// NewDatabaseWithConfig initializes the trie database with provided configs.
+// The path-based scheme is not activated yet, always initialized with legacy
+// hash-based scheme by default.
+func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
+ db := prepare(diskdb, config)
+ db.backend = hashdb.New(diskdb, db.cleans, mptResolver{})
+ return db
+}
+
+// Reader returns a reader for accessing all trie nodes with provided state root.
+// Nil is returned in case the state is not available.
+func (db *Database) Reader(blockRoot common.Hash) Reader {
+ return db.backend.(*hashdb.Database).Reader(blockRoot)
+}
+
+// Update performs a state transition by committing dirty nodes contained in the
+// given set in order to update state from the specified parent to the specified
+// root. The held pre-images accumulated up to this point will be flushed in case
+// the size exceeds the threshold.
+func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+ if db.preimages != nil {
+ db.preimages.commit(false)
+ }
+ return db.backend.Update(root, parent, nodes)
+}
+
+// Commit iterates over all the children of a particular node, writes them out
+// to disk. As a side effect, all pre-images accumulated up to this point are
+// also written.
+func (db *Database) Commit(root common.Hash, report bool) error {
+ if db.preimages != nil {
+ db.preimages.commit(true)
+ }
+ return db.backend.Commit(root, report, nil)
+}
+
+// Size returns the storage size of dirty trie nodes in front of the persistent
+// database and the size of cached preimages.
+func (db *Database) Size() (common.StorageSize, common.StorageSize) {
+ var (
+ storages common.StorageSize
+ preimages common.StorageSize
+ )
+ storages = db.backend.Size()
+ if db.preimages != nil {
+ preimages = db.preimages.size()
+ }
+ return storages, preimages
+}
+
+// Initialized returns an indicator if the state data is already initialized
+// according to the state scheme.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ return db.backend.Initialized(genesisRoot)
+}
+
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return db.backend.Scheme()
+}
+
+// DiskDB retrieves the persistent storage backing the trie database.
+func (db *Database) DiskDB() ethdb.KeyValueStore {
+ return db.backend.DiskDB()
+}
+
+// Nodes retrieves the hashes of all the nodes cached within the memory database.
+// This method is extremely expensive and should only be used to validate internal
+// states in test code.
+func (db *Database) Nodes() []common.Hash {
+ return db.backend.Nodes()
+}
+
+// Close flushes the dangling preimages to disk and closes the trie database.
+// It is meant to be called when closing the blockchain object, so that all
+// resources held can be released correctly.
+func (db *Database) Close() error {
+ if db.preimages != nil {
+ db.preimages.commit(true)
+ }
+ return db.backend.Close()
+}
+
+// saveCache saves clean state cache to given directory path
+// using specified CPU cores.
+func (db *Database) saveCache(dir string, threads int) error {
+ if db.cleans == nil {
+ return nil
+ }
+ log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
+
+ start := time.Now()
+ err := db.cleans.SaveToFileConcurrent(dir, threads)
+ if err != nil {
+ log.Error("Failed to persist clean trie cache", "error", err)
+ return err
+ }
+ log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
+ return nil
+}
+
+// SaveCache atomically saves fast cache data to the given dir using all
+// available CPU cores.
+func (db *Database) SaveCache(dir string) error {
+ return db.saveCache(dir, runtime.GOMAXPROCS(0))
+}
+
+// SaveCachePeriodically atomically saves fast cache data to the given dir with
+// the specified interval. All dump operation will only use a single CPU core.
+func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ db.saveCache(dir, 1)
+ case <-stopCh:
+ return
+ }
+ }
+}
+
+// Cap iteratively flushes old but still referenced trie nodes until the total
+// memory usage goes below the given threshold. The held pre-images accumulated
+// up to this point will be flushed in case the size exceeds the threshold.
+//
+// It's only supported by hash-based database and will return an error for others.
+func (db *Database) Cap(limit common.StorageSize) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ if db.preimages != nil {
+ db.preimages.commit(false)
+ }
+ return hdb.Cap(limit)
+}
+
+// Reference adds a new reference from a parent node to a child node. This function
+// is used to add reference between internal trie node and external node(e.g. storage
+// trie root), all internal trie nodes are referenced together by database itself.
+//
+// It's only supported by hash-based database and will return an error for others.
+func (db *Database) Reference(root common.Hash, parent common.Hash) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ hdb.Reference(root, parent)
+ return nil
+}
+
+// Dereference removes an existing reference from a root node. It's only
+// supported by hash-based database and will return an error for others.
+func (db *Database) Dereference(root common.Hash) error {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return errors.New("not supported")
+ }
+ hdb.Dereference(root)
+ return nil
+}
+
+// Node retrieves the rlp-encoded node blob with provided node hash. It's
+// only supported by hash-based database and will return an error for others.
+// Note, this function should be deprecated once ETH66 is deprecated.
+func (db *Database) Node(hash common.Hash) ([]byte, error) {
+ hdb, ok := db.backend.(*hashdb.Database)
+ if !ok {
+ return nil, errors.New("not supported")
+ }
+ return hdb.Node(hash)
+}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 6fc6eea782..f01c154d26 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -25,9 +25,11 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
func TestEmptyIterator(t *testing.T) {
@@ -64,7 +66,7 @@ func TestIterator(t *testing.T) {
if err != nil {
t.Fatalf("Failed to commit trie %v", err)
}
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
it := NewIterator(trie.NodeIterator(nil))
@@ -117,39 +119,61 @@ func TestIteratorLargeData(t *testing.T) {
}
}
-// Tests that the node iterator indeed walks over the entire database contents.
+type iterationElement struct {
+ hash common.Hash
+ path []byte
+ blob []byte
+}
+
func TestNodeIteratorCoverage(t *testing.T) {
+
+ testNodeIteratorCoverage(t, rawdb.HashScheme)
+ //testNodeIteratorCoverage(t, rawdb.PathScheme)
+}
+
+func testNodeIteratorCoverage(t *testing.T, scheme string) {
// Create some arbitrary test trie to iterate
- db, trie, _ := makeTestTrie()
+ db, nodeDb, trie, _ := makeTestTrie(scheme)
// Gather all the node hashes found by the iterator
- hashes := make(map[common.Hash]struct{})
+ var elements = make(map[common.Hash]iterationElement)
for it := trie.NodeIterator(nil); it.Next(true); {
if it.Hash() != (common.Hash{}) {
- hashes[it.Hash()] = struct{}{}
+ elements[it.Hash()] = iterationElement{
+ hash: it.Hash(),
+ path: common.CopyBytes(it.Path()),
+ blob: common.CopyBytes(it.NodeBlob()),
+ }
}
}
// Cross check the hashes and the database itself
- for hash := range hashes {
- if _, err := db.Node(hash); err != nil {
- t.Errorf("failed to retrieve reported node %x: %v", hash, err)
+ for _, element := range elements {
+ if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil {
+ t.Errorf("failed to retrieve reported node %x: %v", element.hash, err)
+ } else if !bytes.Equal(blob, element.blob) {
+ t.Errorf("node blob is different, want %v got %v", element.blob, blob)
}
}
- for hash, obj := range db.dirties {
- if obj != nil && hash != (common.Hash{}) {
- if _, ok := hashes[hash]; !ok {
- t.Errorf("state entry not reported %x", hash)
- }
- }
- }
- it := db.diskdb.NewIterator(nil, nil)
+ var (
+ count int
+ it = db.NewIterator(nil, nil)
+ )
for it.Next() {
- key := it.Key()
- if _, ok := hashes[common.BytesToHash(key)]; !ok {
- t.Errorf("state entry not reported %x", key)
+ res, _, _ := isTrieNode(nodeDb.Scheme(), it.Key(), it.Value())
+ if !res {
+ continue
+ }
+ count += 1
+ if elem, ok := elements[crypto.Keccak256Hash(it.Value())]; !ok {
+ t.Error("state entry not reported")
+ } else if !bytes.Equal(it.Value(), elem.blob) {
+ t.Errorf("node blob is different, want %v got %v", elem.blob, it.Value())
}
}
it.Release()
+ if count != len(elements) {
+ t.Errorf("state entry is mismatched %d %d", count, len(elements))
+ }
}
type kvs struct{ k, v string }
@@ -225,7 +249,7 @@ func TestDifferenceIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -234,7 +258,7 @@ func TestDifferenceIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
@@ -267,7 +291,7 @@ func TestUnionIterator(t *testing.T) {
triea.Update([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(NewWithNodeSet(nodesA))
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
dbb := NewDatabase(rawdb.NewMemoryDatabase())
@@ -276,7 +300,7 @@ func TestUnionIterator(t *testing.T) {
trieb.Update([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(NewWithNodeSet(nodesB))
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)})
@@ -322,79 +346,98 @@ func TestIteratorNoDups(t *testing.T) {
}
// This test checks that nodeIterator.Next can be retried after inserting missing trie nodes.
-func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueAfterError(t, false) }
-func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) }
+func TestIteratorContinueAfterError(t *testing.T) {
+ testIteratorContinueAfterError(t, false, rawdb.HashScheme)
+ testIteratorContinueAfterError(t, true, rawdb.HashScheme)
+ // testIteratorContinueAfterError(t, false, rawdb.PathScheme)
+ // testIteratorContinueAfterError(t, true, rawdb.PathScheme)
+}
-func testIteratorContinueAfterError(t *testing.T, memonly bool) {
+func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
+ tdb := newTestDatabase(diskdb, scheme)
- tr := NewEmpty(triedb)
+ tr := NewEmpty(tdb)
for _, val := range testdata1 {
tr.Update([]byte(val.k), []byte(val.v))
}
- _, nodes, _ := tr.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := tr.Commit(false)
+ tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(tr.Hash(), true, nil)
+ tdb.Commit(root, false)
}
+ tr, _ = New(TrieID(root), tdb)
wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil)
var (
- diskKeys [][]byte
- memKeys []common.Hash
+ paths [][]byte
+ hashes []common.Hash
)
if memonly {
- memKeys = triedb.Nodes()
+ for path, n := range nodes.Nodes {
+ paths = append(paths, []byte(path))
+ hashes = append(hashes, n.Hash)
+ }
} else {
it := diskdb.NewIterator(nil, nil)
for it.Next() {
- diskKeys = append(diskKeys, it.Key())
+ ok, path, hash := isTrieNode(tdb.Scheme(), it.Key(), it.Value())
+ if !ok {
+ continue
+ }
+ paths = append(paths, path)
+ hashes = append(hashes, hash)
}
it.Release()
}
for i := 0; i < 20; i++ {
// Create trie that will load all nodes from DB.
- tr, _ := New(TrieID(tr.Hash()), triedb)
+ tr, _ := New(TrieID(tr.Hash()), tdb)
// Remove a random node from the database. It can't be the root node
// because that one is already loaded.
var (
- rkey common.Hash
- rval []byte
- robj *cachedNode
+ rval []byte
+ rpath []byte
+ rhash common.Hash
)
for {
if memonly {
- rkey = memKeys[rand.Intn(len(memKeys))]
+ rpath = paths[rand.Intn(len(paths))]
+ n := nodes.Nodes[string(rpath)]
+ if n == nil {
+ continue
+ }
+ rhash = n.Hash
} else {
- copy(rkey[:], diskKeys[rand.Intn(len(diskKeys))])
+ index := rand.Intn(len(paths))
+ rpath = paths[index]
+ rhash = hashes[index]
}
- if rkey != tr.Hash() {
+ if rhash != tr.Hash() {
break
}
}
if memonly {
- robj = triedb.dirties[rkey]
- delete(triedb.dirties, rkey)
+ tr.reader.banned = map[string]struct{}{string(rpath): {}}
} else {
- rval, _ = diskdb.Get(rkey[:])
- diskdb.Delete(rkey[:])
+ rval = rawdb.ReadTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, rpath, rhash, tdb.Scheme())
}
// Iterate until the error is hit.
seen := make(map[string]bool)
it := tr.NodeIterator(nil)
checkIteratorNoDups(t, it, seen)
missing, ok := it.Error().(*MissingNodeError)
- if !ok || missing.NodeHash != rkey {
+ if !ok || missing.NodeHash != rhash {
t.Fatal("didn't hit missing node, got", it.Error())
}
// Add the node back and continue iteration.
if memonly {
- triedb.dirties[rkey] = robj
+ delete(tr.reader.banned, string(rpath))
} else {
- diskdb.Put(rkey[:], rval)
+ rawdb.WriteTrieNode(diskdb, common.Hash{}, rpath, rhash, rval, tdb.Scheme())
}
checkIteratorNoDups(t, it, seen)
if it.Error() != nil {
@@ -409,42 +452,48 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool) {
// Similar to the test above, this one checks that failure to create nodeIterator at a
// certain key prefix behaves correctly when Next is called. The expectation is that Next
// should retry seeking before returning true for the first time.
-func TestIteratorContinueAfterSeekErrorDisk(t *testing.T) {
- testIteratorContinueAfterSeekError(t, false)
-}
-func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) {
- testIteratorContinueAfterSeekError(t, true)
+func TestIteratorContinueAfterSeekError(t *testing.T) {
+ testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme)
+ testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme)
+ // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme)
+ // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme)
}
-func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
+func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) {
// Commit test trie to db, then remove the node containing "bars".
+ var (
+ barNodePath []byte
+ barNodeHash = common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
+ )
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
-
+ triedb := newTestDatabase(diskdb, scheme)
ctr := NewEmpty(triedb)
for _, val := range testdata1 {
ctr.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := ctr.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ for path, n := range nodes.Nodes {
+ if n.Hash == barNodeHash {
+ barNodePath = []byte(path)
+ break
+ }
+ }
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, true, nil)
+ triedb.Commit(root, false)
}
- barNodeHash := common.HexToHash("05041990364eb72fcb1127652ce40d8bab765f2bfe53225b1170d276cc101c2e")
var (
barNodeBlob []byte
- barNodeObj *cachedNode
)
+ tr, _ := New(TrieID(root), triedb)
if memonly {
- barNodeObj = triedb.dirties[barNodeHash]
- delete(triedb.dirties, barNodeHash)
+ tr.reader.banned = map[string]struct{}{string(barNodePath): {}}
} else {
- barNodeBlob, _ = diskdb.Get(barNodeHash[:])
- diskdb.Delete(barNodeHash[:])
+ barNodeBlob = rawdb.ReadTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, triedb.Scheme())
}
// Create a new iterator that seeks to "bars". Seeking can't proceed because
// the node is missing.
- tr, _ := New(TrieID(root), triedb)
it := tr.NodeIterator([]byte("bars"))
missing, ok := it.Error().(*MissingNodeError)
if !ok {
@@ -454,9 +503,9 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
}
// Reinsert the missing node.
if memonly {
- triedb.dirties[barNodeHash] = barNodeObj
+ delete(tr.reader.banned, string(barNodePath))
} else {
- diskdb.Put(barNodeHash[:], barNodeBlob)
+ rawdb.WriteTrieNode(diskdb, common.Hash{}, barNodePath, barNodeHash, barNodeBlob, triedb.Scheme())
}
// Check that iteration produces the right set of values.
if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil {
@@ -477,6 +526,11 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in
return len(seen)
}
+func TestIteratorNodeBlob(t *testing.T) {
+ testIteratorNodeBlob(t, rawdb.HashScheme)
+ //testIteratorNodeBlob(t, rawdb.PathScheme)
+}
+
type loggingDb struct {
getCount uint64
backend ethdb.KeyValueStore
@@ -544,8 +598,8 @@ func makeLargeTestTrie() (*Database, *SecureTrie, *loggingDb) {
val = crypto.Keccak256(val)
trie.Update(key, val)
}
- _, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Return the generated trie
return triedb, trie, logDb
}
@@ -564,10 +618,10 @@ func TestNodeIteratorLargeTrie(t *testing.T) {
}
}
-func TestIteratorNodeBlob(t *testing.T) {
+func testIteratorNodeBlob(t *testing.T, scheme string) {
var (
db = rawdb.NewMemoryDatabase()
- triedb = NewDatabase(db)
+ triedb = newTestDatabase(db, scheme)
trie = NewEmpty(triedb)
)
vals := []struct{ k, v string }{
@@ -584,10 +638,12 @@ func TestIteratorNodeBlob(t *testing.T) {
all[val.k] = val.v
trie.Update([]byte(val.k), []byte(val.v))
}
- trie.Commit(false)
- triedb.Cap(0)
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Commit(root, false)
- found := make(map[common.Hash][]byte)
+ var found = make(map[common.Hash][]byte)
+ trie, _ = New(TrieID(root), triedb)
it := trie.NodeIterator(nil)
for it.Next(true) {
if it.Hash() == (common.Hash{}) {
@@ -595,15 +651,18 @@ func TestIteratorNodeBlob(t *testing.T) {
}
found[it.Hash()] = it.NodeBlob()
}
-
dbIter := db.NewIterator(nil, nil)
defer dbIter.Release()
var count int
for dbIter.Next() {
- got, present := found[common.BytesToHash(dbIter.Key())]
+ ok, _, _ := isTrieNode(triedb.Scheme(), dbIter.Key(), dbIter.Value())
+ if !ok {
+ continue
+ }
+ got, present := found[crypto.Keccak256Hash(dbIter.Value())]
if !present {
- t.Fatalf("Miss trie node %v", dbIter.Key())
+ t.Fatal("Miss trie node")
}
if !bytes.Equal(got, dbIter.Value()) {
t.Fatalf("Unexpected trie node want %v got %v", dbIter.Value(), got)
@@ -612,5 +671,33 @@ func TestIteratorNodeBlob(t *testing.T) {
}
if count != len(found) {
t.Fatal("Find extra trie node via iterator")
+
+ }
+}
+
+// isTrieNode is a helper function which reports if the provided
+// database entry belongs to a trie node or not. Note in tests
+
+// only single layer trie is used, namely storage trie is not
+// considered at all.
+func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) {
+ var (
+ path []byte
+ hash common.Hash
+ )
+ if scheme == rawdb.HashScheme {
+ ok := rawdb.IsLegacyTrieNode(key, val)
+ if !ok {
+ return false, nil, common.Hash{}
+ }
+ hash = common.BytesToHash(key)
+ } else {
+ ok, remain := rawdb.IsAccountTrieNode(key)
+ if !ok {
+ return false, nil, common.Hash{}
+ }
+ path = common.CopyBytes(remain)
+ hash = crypto.Keccak256Hash(val)
}
+ return true, path, hash
}
diff --git a/trie/nodeset.go b/trie/nodeset.go
deleted file mode 100644
index 9288033548..0000000000
--- a/trie/nodeset.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package trie
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/ethereum/go-ethereum/common"
-)
-
-// memoryNode is all the information we know about a single cached trie node
-// in the memory.
-type memoryNode struct {
- hash common.Hash // Node hash, computed by hashing rlp value, empty for deleted nodes
- node []byte // Encoded node blob, nil for deleted nodes
-}
-
-// memorySize returns the total memory size used by this node.
-// nolint:unused
-func (n *memoryNode) memorySize(pathlen int) int {
- return len(n.node) + common.HashLength + pathlen
-}
-
-// isDeleted returns the indicator if the node is marked as deleted.
-func (n *memoryNode) isDeleted() bool {
- return n.hash == (common.Hash{})
-}
-
-// rlp returns the raw rlp encoded blob of the cached trie node, either directly
-// from the cache, or by regenerating it from the collapsed node.
-// nolint:unused
-func (n *memoryNode) rlp() []byte {
- return n.node
-}
-
-// obj returns the decoded and expanded trie node, either directly from the cache,
-// or by regenerating it from the rlp encoded blob.
-// nolint:unused
-func (n *memoryNode) obj() node {
- return mustDecodeNode(n.hash[:], n.node)
-}
-
-// nodeWithPrev wraps the memoryNode with the previous node value.
-type nodeWithPrev struct {
- *memoryNode
- prev []byte // RLP-encoded previous value, nil means it's non-existent
-}
-
-// unwrap returns the internal memoryNode object.
-// nolint:unused
-func (n *nodeWithPrev) unwrap() *memoryNode {
- return n.memoryNode
-}
-
-// memorySize returns the total memory size used by this node. It overloads
-// the function in memoryNode by counting the size of previous value as well.
-// nolint: unused
-func (n *nodeWithPrev) memorySize(key int) int {
- return n.memoryNode.memorySize(key) + len(n.prev)
-}
-
-// NodeSet contains all dirty nodes collected during the commit operation
-// Each node is keyed by path. It's not the thread-safe to use.
-type NodeSet struct {
- owner common.Hash // the identifier of the trie
- leaves []*leaf // the list of dirty leaves
- updates int // the count of updated and inserted nodes
- deletes int // the count of deleted nodes
-
- // The set of all dirty nodes. Dirty nodes include newly inserted nodes,
- // deleted nodes and updated nodes. The original value of the newly
- // inserted node must be nil, and the original value of the other two
- // types must be non-nil.
- nodes map[string]*nodeWithPrev
-}
-
-// NewNodeSet initializes an empty node set to be used for tracking dirty nodes
-// from a specific account or storage trie. The owner is zero for the account
-// trie and the owning account address hash for storage tries.
-func NewNodeSet(owner common.Hash) *NodeSet {
- return &NodeSet{
- owner: owner,
- nodes: make(map[string]*nodeWithPrev),
- }
-}
-
-// forEachWithOrder iterates the dirty nodes with the order from bottom to top,
-// right to left, nodes with the longest path will be iterated first.
-func (set *NodeSet) forEachWithOrder(callback func(path string, n *memoryNode)) {
- var paths sort.StringSlice
- for path := range set.nodes {
- paths = append(paths, path)
- }
- // Bottom-up, longest path first
- sort.Sort(sort.Reverse(paths))
- for _, path := range paths {
- callback(path, set.nodes[path].unwrap())
- }
-}
-
-// addNode adds the provided dirty node into set.
-func (set *NodeSet) addNode(path []byte, n *nodeWithPrev) {
- if n.isDeleted() {
- set.deletes += 1
- } else {
- set.updates += 1
- }
- set.nodes[string(path)] = n
-}
-
-// addLeaf collects the provided leaf node into set.
-func (set *NodeSet) addLeaf(leaf *leaf) {
- set.leaves = append(set.leaves, leaf)
-}
-
-// Size returns the number of updated and deleted nodes contained in the set.
-func (set *NodeSet) Size() (int, int) {
- return set.updates, set.deletes
-}
-
-// Hashes returns the hashes of all updated nodes.
-func (set *NodeSet) Hashes() []common.Hash {
- var ret []common.Hash
- for _, node := range set.nodes {
- ret = append(ret, node.hash)
- }
- return ret
-}
-
-// Summary returns a string-representation of the NodeSet.
-func (set *NodeSet) Summary() string {
- var out = new(strings.Builder)
- fmt.Fprintf(out, "nodeset owner: %v\n", set.owner)
- if set.nodes != nil {
- for path, n := range set.nodes {
- // Deletion
- if n.isDeleted() {
- fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.prev)
- continue
- }
- // Insertion
- if len(n.prev) == 0 {
- fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.hash)
- continue
- }
- // Update
- fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.hash, n.prev)
- }
- }
- for _, n := range set.leaves {
- fmt.Fprintf(out, "[leaf]: %v\n", n)
- }
- return out.String()
-}
-
-// MergedNodeSet represents a merged dirty node set for a group of tries.
-type MergedNodeSet struct {
- sets map[common.Hash]*NodeSet
-}
-
-// NewMergedNodeSet initializes an empty merged set.
-func NewMergedNodeSet() *MergedNodeSet {
- return &MergedNodeSet{sets: make(map[common.Hash]*NodeSet)}
-}
-
-// NewWithNodeSet constructs a merged nodeset with the provided single set.
-func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
- merged := NewMergedNodeSet()
- merged.Merge(set)
- return merged
-}
-
-// Merge merges the provided dirty nodes of a trie into the set. The assumption
-// is held that no duplicated set belonging to the same trie will be merged twice.
-func (set *MergedNodeSet) Merge(other *NodeSet) error {
- _, present := set.sets[other.owner]
- if present {
- return fmt.Errorf("duplicate trie for owner %#x", other.owner)
- }
- set.sets[other.owner] = other
- return nil
-}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index ce69b839bb..973596a58f 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// SecureTrie wraps a trie with key hashing. In a secure trie, all
@@ -166,7 +167,7 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte {
// collectLeaf is true) will be encapsulated into a nodeset for return.
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// All cached preimages will be also flushed if preimages recording is enabled.
-func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
+func (t *SecureTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
if t.preimages != nil { // Ugly direct check but avoids the below write lock
diff --git a/trie/sync_test.go b/trie/sync_test.go
index c964608aa1..d1d26de5eb 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -23,14 +23,19 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
+func makeTestTrie(scheme string) (ethdb.Database, *Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
+ db := rawdb.NewMemoryDatabase()
+
+ triedb := newTestDatabase(db, scheme)
trie, _ := NewSecure(TrieID(common.Hash{}), triedb)
// Fill it with some arbitrary data
@@ -52,27 +57,31 @@ func makeTestTrie() (*Database, *SecureTrie, map[string][]byte) {
trie.Update(key, val)
}
}
- _, nodes, err := trie.Commit(false)
+ root, nodes, err := trie.Commit(false)
if err != nil {
panic(fmt.Errorf("failed to commit trie: %v", err))
}
- if err := triedb.Update(NewWithNodeSet(nodes)); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
+ if err := triedb.Commit(root, false); err != nil {
+ panic(err)
+ }
// Return the generated trie
- return triedb, trie, content
+ return db, triedb, trie, content
}
// checkTrieContents cross references a reconstructed trie with an expected data
// content map.
-func checkTrieContents(t *testing.T, db *Database, root []byte, content map[string][]byte) {
+func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) {
// Check root availability and trie contents
- trie, err := NewSecure(TrieID(common.BytesToHash(root)), db)
+ ndb := newTestDatabase(db, scheme)
+ trie, err := NewSecure(TrieID(common.BytesToHash(root)), ndb)
if err != nil {
t.Fatalf("failed to create trie at %x: %v", root, err)
}
- if err := checkTrieConsistency(db, common.BytesToHash(root)); err != nil {
+ if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil {
t.Fatalf("inconsistent trie at %x: %v", root, err)
}
for key, val := range content {
@@ -83,9 +92,9 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
}
// checkTrieConsistency checks that all nodes in a trie are indeed present.
-func checkTrieConsistency(db *Database, root common.Hash) error {
- // Create and iterate a trie rooted in a subnode
- trie, err := NewSecure(TrieID(root), db)
+func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error {
+ ndb := newTestDatabase(db, scheme)
+ trie, err := NewSecure(TrieID(root), ndb)
if err != nil {
return nil // Consider a non existent state consistent
}
@@ -106,11 +115,16 @@ type trieElement struct {
func TestEmptySync(t *testing.T) {
dbA := NewDatabase(rawdb.NewMemoryDatabase())
dbB := NewDatabase(rawdb.NewMemoryDatabase())
+ //dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+ //dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
+
emptyA := NewEmpty(dbA)
emptyB, _ := New(TrieID(emptyRoot), dbB)
+ //emptyC := NewEmpty(dbC)
+ //emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
- for i, trie := range []*Trie{emptyA, emptyB} {
- sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB}[i].Scheme())
+ for i, trie := range []*Trie{emptyA, emptyB /*emptyC, emptyD*/} {
+ sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New()), []*Database{dbA, dbB /*dbC, dbD*/}[i].Scheme())
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes)
}
@@ -119,18 +133,23 @@ func TestEmptySync(t *testing.T) {
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go.
-func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1, false) }
-func TestIterativeSyncBatched(t *testing.T) { testIterativeSync(t, 100, false) }
-func TestIterativeSyncIndividualByPath(t *testing.T) { testIterativeSync(t, 1, true) }
-func TestIterativeSyncBatchedByPath(t *testing.T) { testIterativeSync(t, 100, true) }
+func TestIterativeSync(t *testing.T) {
+ testIterativeSync(t, 1, false, rawdb.HashScheme)
+ testIterativeSync(t, 100, false, rawdb.HashScheme)
+ testIterativeSync(t, 1, true, rawdb.HashScheme)
+ testIterativeSync(t, 100, true, rawdb.HashScheme)
+ // testIterativeSync(t, 1, false, rawdb.PathScheme)
+ // testIterativeSync(t, 100, false, rawdb.PathScheme)
+ // testIterativeSync(t, 1, true, rawdb.PathScheme)
+ // testIterativeSync(t, 100, true, rawdb.PathScheme)
+}
-func testIterativeSync(t *testing.T, count int, bypath bool) {
+func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -148,7 +167,8 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
results := make([]NodeSyncResult, len(elements))
if !bypath {
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
}
@@ -185,18 +205,21 @@ func testIterativeSync(t *testing.T, count int, bypath bool) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned, and the others sent only later.
func TestIterativeDelayedSync(t *testing.T) {
- // Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ testIterativeDelayedSync(t, rawdb.HashScheme)
+ //testIterativeDelayedSync(t, rawdb.PathScheme)
+}
+func testIterativeDelayedSync(t *testing.T, scheme string) {
+ // Create a random trie to copy
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -214,7 +237,8 @@ func TestIterativeDelayedSync(t *testing.T) {
// Sync only half of the scheduled nodes
results := make([]NodeSyncResult, len(elements)/2+1)
for i, element := range elements[:len(results)] {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -242,22 +266,25 @@ func TestIterativeDelayedSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go, however in a
// random order.
-func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) }
-func TestIterativeRandomSyncBatched(t *testing.T) { testIterativeRandomSync(t, 100) }
+func TestIterativeRandomSyncIndividual(t *testing.T) {
+ testIterativeRandomSync(t, 1, rawdb.HashScheme)
+ testIterativeRandomSync(t, 100, rawdb.HashScheme)
+ // testIterativeRandomSync(t, 1, rawdb.PathScheme)
+ // testIterativeRandomSync(t, 100, rawdb.PathScheme)
+}
-func testIterativeRandomSync(t *testing.T, count int) {
+func testIterativeRandomSync(t *testing.T, count int, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -275,7 +302,8 @@ func testIterativeRandomSync(t *testing.T, count int) {
// Fetch all the queued nodes in a random order
results := make([]NodeSyncResult, 0, len(queue))
for path, element := range queue {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -304,18 +332,22 @@ func testIterativeRandomSync(t *testing.T, count int) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that the trie scheduler can correctly reconstruct the state even if only
// partial results are returned (Even those randomly), others sent only later.
func TestIterativeRandomDelayedSync(t *testing.T) {
+ testIterativeRandomDelayedSync(t, rawdb.HashScheme)
+ // testIterativeRandomDelayedSync(t, rawdb.PathScheme)
+}
+
+func testIterativeRandomDelayedSync(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -333,7 +365,8 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
// Sync only half of the scheduled nodes, even those in random order
results := make([]NodeSyncResult, 0, len(queue)/2+1)
for path, element := range queue {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -367,18 +400,22 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that a trie sync will not request nodes multiple times, even if they
// have such references.
func TestDuplicateAvoidanceSync(t *testing.T) {
+ testDuplicateAvoidanceSync(t, rawdb.HashScheme)
+ // testDuplicateAvoidanceSync(t, rawdb.PathScheme)
+}
+
+func testDuplicateAvoidanceSync(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -397,7 +434,8 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
for len(elements) > 0 {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -430,26 +468,33 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
}
// Tests that at any point in time during a sync, only complete sub-tries are in
// the database.
-func TestIncompleteSync(t *testing.T) {
+func TestIncompleteSyncHash(t *testing.T) {
+ testIncompleteSync(t, rawdb.HashScheme)
+ // testIncompleteSync(t, rawdb.PathScheme)
+}
+
+func testIncompleteSync(t *testing.T, scheme string) {
+ t.Parallel()
+
// Create a random trie to copy
- srcDb, srcTrie, _ := makeTestTrie()
+ _, srcDb, srcTrie, _ := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
// at the testing trie.
var (
- added []common.Hash
- elements []trieElement
- root = srcTrie.Hash()
+ addedKeys []string
+ addedHashes []common.Hash
+ elements []trieElement
+ root = srcTrie.Hash()
)
paths, nodes, _ := sched.Missing(1)
for i := 0; i < len(paths); i++ {
@@ -463,7 +508,8 @@ func TestIncompleteSync(t *testing.T) {
// Fetch a batch of trie nodes
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -484,11 +530,8 @@ func TestIncompleteSync(t *testing.T) {
for _, result := range results {
hash := crypto.Keccak256Hash(result.Data)
if hash != root {
- added = append(added, hash)
- }
- // Check that all known sub-tries in the synced trie are complete
- if err := checkTrieConsistency(triedb, hash); err != nil {
- t.Fatalf("trie inconsistent: %v", err)
+ addedKeys = append(addedKeys, result.Path)
+ addedHashes = append(addedHashes, crypto.Keccak256Hash(result.Data))
}
}
// Fetch the next batch to retrieve
@@ -503,25 +546,31 @@ func TestIncompleteSync(t *testing.T) {
}
}
// Sanity check that removing any node from the database is detected
- for _, hash := range added {
- value, _ := diskdb.Get(hash.Bytes())
- diskdb.Delete(hash.Bytes())
- if err := checkTrieConsistency(triedb, root); err == nil {
- t.Fatalf("trie inconsistency not caught, missing: %x", hash)
- }
- diskdb.Put(hash.Bytes(), value)
+ for i, path := range addedKeys {
+ owner, inner := ResolvePath([]byte(path))
+ nodeHash := addedHashes[i]
+ value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme)
+ rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme)
+ if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil {
+ t.Fatalf("trie inconsistency not caught, missing: %x", path)
+ }
+ rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme)
}
}
// Tests that trie nodes get scheduled lexicographically when having the same
// depth.
func TestSyncOrdering(t *testing.T) {
+ testSyncOrdering(t, rawdb.HashScheme)
+ // testSyncOrdering(t, rawdb.PathScheme)
+}
+
+func testSyncOrdering(t *testing.T, scheme string) {
// Create a random trie to copy
- srcDb, srcTrie, srcData := makeTestTrie()
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
// Create a destination trie and sync with the scheduler, tracking the requests
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb), srcDb.Scheme())
// The code requests are ignored here since there is no code
@@ -543,7 +592,8 @@ func TestSyncOrdering(t *testing.T) {
for len(elements) > 0 {
results := make([]NodeSyncResult, len(elements))
for i, element := range elements {
- data, err := srcDb.Node(element.hash)
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(srcTrie.Hash()).Node(owner, inner, element.hash)
if err != nil {
t.Fatalf("failed to retrieve node data for %x: %v", element.hash, err)
}
@@ -572,7 +622,7 @@ func TestSyncOrdering(t *testing.T) {
}
}
// Cross check that the two tries are in sync
- checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
// Check that the trie nodes have been requested path-ordered
for i := 0; i < len(reqs)-1; i++ {
@@ -586,3 +636,116 @@ func TestSyncOrdering(t *testing.T) {
}
}
}
+
+func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) {
+ // Create a destination trie and sync with the scheduler
+ sched := NewSync(root, db, nil, NewSyncBloom(1, db), srcDb.Scheme())
+
+ // The code requests are ignored here since there is no code
+ // at the testing trie.
+ paths, nodes, _ := sched.Missing(1)
+ var elements []trieElement
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ for len(elements) > 0 {
+ results := make([]NodeSyncResult, len(elements))
+ for i, element := range elements {
+ owner, inner := ResolvePath([]byte(element.path))
+ data, err := srcDb.Reader(root).Node(owner, inner, element.hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve node data for hash %x: %v", element.hash, err)
+ }
+ results[i] = NodeSyncResult{element.path, data}
+ }
+ for index, result := range results {
+ if err := sched.ProcessNode(result); err != nil {
+ t.Fatalf("failed to process result[%d][%v] data %v %v", index, []byte(result.Path), result.Data, err)
+ }
+ }
+ batch := db.NewBatch()
+ if err := sched.Commit(batch); err != nil {
+ t.Fatalf("failed to commit data: %v", err)
+ }
+ batch.Write()
+
+ paths, nodes, _ = sched.Missing(1)
+ elements = elements[:0]
+ for i := 0; i < len(paths); i++ {
+ elements = append(elements, trieElement{
+ path: paths[i],
+ hash: nodes[i],
+ syncPath: NewSyncPath([]byte(paths[i])),
+ })
+ }
+ }
+}
+
+// Tests that the syncing target is keeping moving which may overwrite the stale
+// states synced in the last cycle.
+func TestSyncMovingTarget(t *testing.T) {
+ testSyncMovingTarget(t, rawdb.HashScheme)
+ // testSyncMovingTarget(t, rawdb.PathScheme)
+}
+
+func testSyncMovingTarget(t *testing.T, scheme string) {
+ // Create a random trie to copy
+ _, srcDb, srcTrie, srcData := makeTestTrie(scheme)
+
+ // Create a destination trie and sync with the scheduler
+ diskdb := rawdb.NewMemoryDatabase()
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData)
+
+ // Push more modifications into the src trie, to see if dest trie can still
+ // sync with it(overwrite stale states)
+ var (
+ preRoot = srcTrie.Hash()
+ diff = make(map[string][]byte)
+ )
+ for i := byte(0); i < 10; i++ {
+ key, val := randBytes(32), randBytes(32)
+ srcTrie.Update(key, val)
+ diff[string(key)] = val
+ }
+ root, nodes, _ := srcTrie.Commit(false)
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ panic(err)
+ }
+ if err := srcDb.Commit(root, false); err != nil {
+ panic(err)
+ }
+ preRoot = root
+ srcTrie, _ = NewSecure(TrieID(root), srcDb)
+
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff)
+
+ // Revert added modifications from the src trie, to see if dest trie can still
+ // sync with it(overwrite reverted states)
+ var reverted = make(map[string][]byte)
+ for k := range diff {
+ srcTrie.Delete([]byte(k))
+ reverted[k] = nil
+ }
+ for k := range srcData {
+ val := randBytes(32)
+ srcTrie.Update([]byte(k), val)
+ reverted[k] = val
+ }
+ root, nodes, _ = srcTrie.Commit(false)
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
+ panic(err)
+ }
+ if err := srcDb.Commit(root, false); err != nil {
+ panic(err)
+ }
+ srcTrie, _ = NewSecure(TrieID(root), srcDb)
+
+ syncWith(t, srcTrie.Hash(), diskdb, srcDb)
+ checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted)
+}
diff --git a/trie/tracer.go b/trie/tracer.go
index cd5ebb85a2..56238713f7 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -16,6 +16,11 @@
package trie
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+)
+
// tracer tracks the changes of trie nodes. During the trie operations,
// some nodes can be deleted from the trie, while these deleted nodes
// won't be captured by trie.Hasher or trie.Commiter. Thus, these deleted
@@ -110,7 +115,7 @@ func (t *tracer) copy() *tracer {
}
// markDeletions puts all tracked deletions into the provided nodeset.
-func (t *tracer) markDeletions(set *NodeSet) {
+func (t *tracer) markDeletions(set *trienode.NodeSet) {
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
// in their parent before, the deletions can be no
@@ -119,6 +124,6 @@ func (t *tracer) markDeletions(set *NodeSet) {
if !ok {
continue
}
- set.addNode([]byte(path), &nodeWithPrev{&memoryNode{}, prev})
+ set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev))
}
}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index f8511a5e67..2421d88202 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -22,6 +22,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -69,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -135,7 +137,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -143,13 +145,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Update trie
+ parent := root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
trie.Update([]byte(val.k), randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -157,6 +160,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Add more new nodes
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
var keys []string
@@ -166,7 +170,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.Update(key, randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -174,13 +178,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Partial deletions
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, key := range keys {
trie.Update([]byte(key), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -188,13 +193,14 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Delete all
+ parent = root
trie, _ = New(TrieID(root), db)
orig = trie.Copy()
for _, val := range vals {
trie.Update([]byte(val.k), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -213,7 +219,7 @@ func TestAccessListLeak(t *testing.T) {
trie.Update([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
var cases = []struct {
op func(tr *Trie)
@@ -263,15 +269,16 @@ func TestTinyTree(t *testing.T) {
trie.Update([]byte(val.k), randBytes(32))
}
root, set, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(set))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
+ parent := root
trie, _ = New(TrieID(root), db)
orig := trie.Copy()
for _, val := range tiny {
trie.Update([]byte(val.k), []byte(val.v))
}
root, set, _ = trie.Commit(false)
- db.Update(NewWithNodeSet(set))
+ db.Update(root, parent, trienode.NewWithNodeSet(set))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
diff --git a/trie/trie.go b/trie/trie.go
index bbfb0b662f..ae00e542e5 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -582,9 +583,9 @@ func (t *Trie) Hash() common.Hash {
// The returned nodeset can be nil if the trie is clean(nothing to commit).
// Once the trie is committed, it's not usable anymore. A new trie must
// be created with new root and updated trie database for following usage
-func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) {
+func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
defer t.tracer.reset()
- nodes := NewNodeSet(t.owner)
+ nodes := trienode.NewNodeSet(t.owner)
t.tracer.markDeletions(nodes)
if t.root == nil {
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 1f3a2b8982..58a9f7ed86 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -32,9 +32,9 @@ type Reader interface {
// NodeReader wraps all the necessary functions for accessing trie node.
type NodeReader interface {
- // GetReader returns a reader for accessing all trie nodes with provided
+ // Reader returns a reader for accessing all trie nodes with provided
// state root. Nil is returned in case the state is not available.
- GetReader(root common.Hash) Reader
+ Reader(root common.Hash) Reader
}
// trieReader is a wrapper of the underlying node reader. It's not safe
@@ -47,7 +47,7 @@ type trieReader struct {
// newTrieReader initializes the trie reader with the given node reader.
func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) {
- reader := db.GetReader(stateRoot)
+ reader := db.Reader(stateRoot)
if reader == nil {
return nil, fmt.Errorf("state not found #%x", stateRoot)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 499f0574df..d71ea35343 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/leveldb"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
)
@@ -80,20 +81,24 @@ func TestMissingRoot(t *testing.T) {
}
}
-func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) }
-func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) }
+func TestMissingNode(t *testing.T) {
+ testMissingNode(t, false, rawdb.HashScheme)
+ //testMissingNode(t, false, rawdb.PathScheme)
+ testMissingNode(t, true, rawdb.HashScheme)
+ //testMissingNode(t, true, rawdb.PathScheme)
+}
-func testMissingNode(t *testing.T, memonly bool) {
+func testMissingNode(t *testing.T, memonly bool, scheme string) {
diskdb := rawdb.NewMemoryDatabase()
- triedb := NewDatabase(diskdb)
+ triedb := newTestDatabase(diskdb, scheme)
trie := NewEmpty(triedb)
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, true, nil)
+ triedb.Commit(root, true)
}
trie, _ = New(TrieID(root), triedb)
@@ -122,34 +127,38 @@ func testMissingNode(t *testing.T, memonly bool) {
t.Errorf("Unexpected error: %v", err)
}
- hash := common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
+ var (
+ path []byte
+ hash = common.HexToHash("0xe1d943cc8f061a0c0b98162830b970395ac9315654824bf21b73b891365262f9")
+ )
+ for p, n := range nodes.Nodes {
+ if n.Hash == hash {
+ path = common.CopyBytes([]byte(p))
+ break
+ }
+ }
if memonly {
- delete(triedb.dirties, hash)
+ trie.reader.banned = map[string]struct{}{string(path): {}}
} else {
- diskdb.Delete(hash[:])
+ rawdb.DeleteTrieNode(diskdb, common.Hash{}, path, hash, scheme)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120000"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("120099"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
_, err = trie.TryGet([]byte("123456"))
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
err = trie.TryUpdate([]byte("120099"), []byte("zxcv"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
}
- trie, _ = New(TrieID(root), triedb)
err = trie.TryDelete([]byte("123456"))
if _, ok := err.(*MissingNodeError); !ok {
t.Errorf("Wrong error: %v", err)
@@ -204,7 +213,7 @@ func TestGet(t *testing.T) {
return
}
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
}
}
@@ -261,8 +270,8 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- triedb := NewDatabase(rawdb.NewMemoryDatabase())
- trie := NewEmpty(triedb)
+ db := NewDatabase(rawdb.NewMemoryDatabase())
+ trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -275,16 +284,16 @@ func TestReplication(t *testing.T) {
for _, val := range vals {
updateString(trie, val.k, val.v)
}
- exp, nodes, err := trie.Commit(false)
+ root, nodes, err := trie.Commit(false)
if err != nil {
t.Fatalf("commit error: %v", err)
}
- triedb.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work.
- trie2, err := New(TrieID(exp), triedb)
+ trie2, err := New(TrieID(root), db)
if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", exp, err)
+ t.Fatalf("can't recreate trie at %x: %v", root, err)
}
for _, kv := range vals {
if string(getString(trie2, kv.k)) != kv.v {
@@ -295,16 +304,16 @@ func TestReplication(t *testing.T) {
if err != nil {
t.Fatalf("commit error: %v", err)
}
- if hash != exp {
- t.Errorf("root failure. expected %x got %x", exp, hash)
+ if hash != root {
+ t.Errorf("root failure. expected %x got %x", root, hash)
}
// recreate the trie after commit
if nodes != nil {
- triedb.Update(NewWithNodeSet(nodes))
+ db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
- trie2, err = New(TrieID(hash), triedb)
+ trie2, err = New(TrieID(hash), db)
if err != nil {
- t.Fatalf("can't recreate trie at %x: %v", exp, err)
+ t.Fatalf("can't recreate trie at %x: %v", root, err)
}
// perform some insertions on the new trie.
@@ -322,8 +331,8 @@ func TestReplication(t *testing.T) {
for _, val := range vals2 {
updateString(trie2, val.k, val.v)
}
- if hash := trie2.Hash(); hash != exp {
- t.Errorf("root failure. expected %x got %x", exp, hash)
+ if trie2.Hash() != hash {
+ t.Errorf("root failure. expected %x got %x", hash, hash)
}
}
@@ -421,42 +430,42 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
}
// verifyAccessList verifies the access list of the new trie against the old trie.
-func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error {
+func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
deletes, inserts, updates := diffTries(old, new)
// Check insertion set
for path := range inserts {
- n, ok := set.nodes[path]
- if !ok || n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- if len(n.prev) > 0 {
+ if len(n.Prev) > 0 {
return errors.New("unexpected origin value")
}
}
// Check deletion set
for path, blob := range deletes {
- n, ok := set.nodes[path]
- if !ok || !n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- if len(n.prev) == 0 {
+ if len(n.Prev) == 0 {
return errors.New("expect origin value")
}
- if !bytes.Equal(n.prev, blob) {
+ if !bytes.Equal(n.Prev, blob) {
return errors.New("invalid origin value")
}
}
// Check update set
for path, blob := range updates {
- n, ok := set.nodes[path]
- if !ok || n.isDeleted() {
+ n, ok := set.Nodes[path]
+ if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- if len(n.prev) == 0 {
+ if len(n.Prev) == 0 {
return errors.New("expect origin value")
}
- if !bytes.Equal(n.prev, blob) {
+ if !bytes.Equal(n.Prev, blob) {
return errors.New("invalid origin value")
}
}
@@ -464,8 +473,13 @@ func verifyAccessList(old *Trie, new *Trie, set *NodeSet) error {
}
func runRandTest(rt randTest) bool {
+ var scheme = rawdb.HashScheme
+ //if rand.Intn(2) == 0 {
+ // scheme = rawdb.PathScheme
+ //}
var (
- triedb = NewDatabase(rawdb.NewMemoryDatabase())
+ origin = types.EmptyRootHash
+ triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme)
tr = NewEmpty(triedb)
origTrie = NewEmpty(triedb)
values = make(map[string]string) // tracks content of the trie
@@ -512,7 +526,7 @@ func runRandTest(rt randTest) bool {
return false
}
if nodes != nil {
- triedb.Update(NewWithNodeSet(nodes))
+ triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -531,6 +545,7 @@ func runRandTest(rt randTest) bool {
tr.tracer = newTracer()
tr.resolveAndTrack(root.Bytes(), nil)
origTrie = tr.Copy()
+ origin = root
case opItercheckhash:
checktr := NewEmpty(triedb)
@@ -787,42 +802,30 @@ func (b *spongeBatch) Replay(w ethdb.KeyValueWriter) error { return nil }
// to check whether changes to the trie modifies the write order or data in any way.
func TestCommitSequence(t *testing.T) {
for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- expCallbackSeqHash []byte
+ count int
+ expWriteSeqHash []byte
}{
- {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"),
- common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")},
- {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"),
- common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")},
- {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"),
- common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")},
+ {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")},
+ {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")},
+ {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")},
} {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
- // Another sponge is used to check the callback-sequence
- callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, func(c common.Hash) {
- // And spongify the callback-order
- callbackSponge.Write(c[:])
- })
+ db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
- if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
- t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
- }
}
}
@@ -830,24 +833,18 @@ func TestCommitSequence(t *testing.T) {
// but uses random blobs instead of 'accounts'
func TestCommitSequenceRandomBlobs(t *testing.T) {
for i, tc := range []struct {
- count int
- expWriteSeqHash []byte
- expCallbackSeqHash []byte
+ count int
+ expWriteSeqHash []byte
}{
- {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc"),
- common.FromHex("450238d73bc36dc6cc6f926987e5428535e64be403877c4560e238a52749ba24")},
- {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554"),
- common.FromHex("0ace0b03d6cb8c0b82f6289ef5b1a1838306b455a62dafc63cada8e2924f2550")},
- {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424"),
- common.FromHex("117d30dafaa62a1eed498c3dfd70982b377ba2b46dd3e725ed6120c80829e518")},
+ {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")},
+ {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")},
+ {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")},
} {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
db := NewDatabase(rawdb.NewDatabase(s))
trie := NewEmpty(db)
- // Another sponge is used to check the callback-sequence
- callbackSponge := sha3.NewLegacyKeccak256()
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
key := make([]byte, 32)
@@ -864,18 +861,12 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, func(c common.Hash) {
- // And spongify the callback-order
- callbackSponge.Write(c[:])
- })
+ db.Commit(root, false)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
- if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) {
- t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp)
- }
}
}
@@ -910,9 +901,9 @@ func TestCommitSequenceStackTrie(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, nil)
+ db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
@@ -959,9 +950,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) {
stTrie.TryUpdate(key, []byte{0x1})
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(NewWithNodeSet(nodes))
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false, nil)
+ db.Commit(root, false)
// And flush stacktrie -> disk
stRoot, err := stTrie.Commit()
if err != nil {
@@ -1130,8 +1121,8 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts []
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
h := trie.Hash()
- _, nodes, _ := trie.Commit(false)
- triedb.Update(NewWithNodeSet(nodes))
+ root, nodes, _ := trie.Commit(false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
b.StartTimer()
triedb.Dereference(h)
b.StopTimer()
diff --git a/trie/database.go b/trie/triedb/hashdb/database.go
similarity index 82%
rename from trie/database.go
rename to trie/triedb/hashdb/database.go
index bd9d97d50b..4181a7ebfc 100644
--- a/trie/database.go
+++ b/trie/triedb/hashdb/database.go
@@ -14,12 +14,11 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package trie
+package hashdb
import (
"errors"
"reflect"
- "runtime"
"sync"
"time"
@@ -31,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
)
var (
@@ -57,10 +57,10 @@ var (
memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)
-// childResolver defines the required method to decode the provided
+// ChildResolver defines the required method to decode the provided
// trie node and iterate the children on top.
-type childResolver interface {
- forEach(node []byte, onChild func(common.Hash))
+type ChildResolver interface {
+ ForEach(node []byte, onChild func(common.Hash))
}
// Database is an intermediate write layer between the trie data structures and
@@ -73,7 +73,7 @@ type childResolver interface {
// servers even while the trie is executing expensive garbage collection.
type Database struct {
diskdb ethdb.Database // Persistent storage for matured trie nodes
- resolver childResolver // Resolver for trie node children
+ resolver ChildResolver // Resolver for trie node children
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
@@ -90,7 +90,6 @@ type Database struct {
dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata)
childrenSize common.StorageSize // Storage size of the external children tracking
- preimages *preimageStore // Store for caching preimages of trie nodes
lock sync.RWMutex
}
@@ -112,11 +111,11 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
// forChildren invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
-func (n *cachedNode) forChildren(resolver childResolver, onChild func(hash common.Hash)) {
+func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
for child := range n.external {
onChild(child)
}
- resolver.forEach(n.node, onChild)
+ resolver.ForEach(n.node, onChild)
}
// Config defines all necessary options for database.
@@ -126,37 +125,14 @@ type Config struct {
Preimages bool // Flag whether the preimage of trie key is recorded
}
-// NewDatabase creates a new trie database to store ephemeral trie content before
-// its written out to disk or garbage collected. No read cache is created, so all
-// data retrievals will hit the underlying disk database.
-// Using ethdb.Database which covers KeyValueStore and Freezer Interfaces.
-func NewDatabase(diskdb ethdb.Database) *Database {
- return NewDatabaseWithConfig(diskdb, nil)
-}
-
-// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
-// before its written out to disk or garbage collected. It also acts as a read cache
-// for nodes loaded from disk.
-func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
- var cleans *fastcache.Cache
- if config != nil && config.Cache > 0 {
- if config.Journal == "" {
- cleans = fastcache.New(config.Cache * 1024 * 1024)
- } else {
- cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
- }
- }
- var preimage *preimageStore
- if config != nil && config.Preimages {
- preimage = newPreimageStore(diskdb)
- }
+// New initializes the hash-based node database.
+func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database {
db := &Database{
- diskdb: diskdb,
- resolver: mptResolver{},
- cleans: cleans,
- dirties: make(map[common.Hash]*cachedNode),
- preimages: preimage,
+ diskdb: diskdb,
+ resolver: resolver,
+ cleans: cleans,
+ dirties: make(map[common.Hash]*cachedNode),
}
return db
}
@@ -376,12 +352,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
size += db.childrenSize
- // If the preimage cache got large enough, push to disk. If it's still small
- // leave for later to deduplicate writes.
- if db.preimages != nil {
- db.preimages.commit(false)
- }
-
// Keep committing nodes from the flush-list until we're below allowance
oldest := db.oldest
for size > limit && oldest != (common.Hash{}) {
@@ -456,10 +426,6 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
start := time.Now()
batch := db.diskdb.NewBatch()
- // Move all of the accumulated preimages into a write batch
- if db.preimages != nil {
- db.preimages.commit(true)
- }
// Move the trie itself into the batch, flushing if enough data is accumulated
nodes, storage := len(db.dirties), db.dirtiesSize
@@ -588,9 +554,23 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented")
}
-// Update inserts the dirty nodes in the provided nodeset into database and
-// link the account trie with multiple storage tries if necessary.
-func (db *Database) Update(nodes *MergedNodeSet) error {
+// Initialized returns an indicator if state data is already initialized
+// in hash-based scheme by checking the presence of genesis state.
+func (db *Database) Initialized(genesisRoot common.Hash) bool {
+ return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
+}
+
+// Update inserts the dirty nodes in provided nodeset into database and link the
+// account trie with multiple storage tries if necessary.
+//
+// root and parent are used for path-based only
+func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+ // Ensure the parent state is present and signal a warning if not.
+ if parent != types.EmptyRootHash {
+ if blob, _ := db.Node(parent); len(blob) == 0 {
+ log.Error("parent state is not present")
+ }
+ }
db.lock.Lock()
defer db.lock.Unlock()
// Insert dirty nodes into the database. In the same tree, it must be
@@ -600,44 +580,47 @@ func (db *Database) Update(nodes *MergedNodeSet) error {
// Note, the storage tries must be flushed before the account trie to
// retain the invariant that children go into the dirty cache first.
var order []common.Hash
- for owner := range nodes.sets {
+ for owner := range nodes.Sets {
if owner == (common.Hash{}) {
continue
}
order = append(order, owner)
}
- if _, ok := nodes.sets[common.Hash{}]; ok {
+ if _, ok := nodes.Sets[common.Hash{}]; ok {
order = append(order, common.Hash{})
}
for _, owner := range order {
- subset := nodes.sets[owner]
- subset.forEachWithOrder(func(path string, n *memoryNode) {
- if n.isDeleted() {
+ subset := nodes.Sets[owner]
+ subset.ForEachWithOrder(func(path string, n *trienode.Node) {
+ if n.IsDeleted() {
return // ignore deletion
}
- db.insert(n.hash, n.node)
+ db.insert(n.Hash, n.Blob)
})
}
// Link up the account trie and storage trie if the node points
// to an account trie leaf.
- if set, present := nodes.sets[common.Hash{}]; present {
- for _, leaf := range set.leaves {
+ if set, present := nodes.Sets[common.Hash{}]; present {
+ for _, leaf := range set.Leaves {
// Looping node leaf, then reference the leaf node to the root node
var account types.StateAccount
- if err := rlp.DecodeBytes(leaf.blob, &account); err != nil {
+ if err := rlp.DecodeBytes(leaf.Blob, &account); err != nil {
return err
}
- if account.Root != emptyRoot {
- db.reference(account.Root, leaf.parent)
+ if account.Root != types.EmptyRootHash {
+ db.reference(account.Root, leaf.Parent)
}
}
}
return nil
}
+// Close closes the trie database and releases all held resources.
+func (db *Database) Close() error { return nil }
+
// Size returns the current storage size of the memory cache in front of the
// persistent database layer.
-func (db *Database) Size() (common.StorageSize, common.StorageSize) {
+func (db *Database) Size() common.StorageSize {
db.lock.RLock()
defer db.lock.RUnlock()
@@ -645,76 +628,27 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) {
// the total memory consumption, the maintenance metadata is also needed to be
// counted.
var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
- var preimageSize common.StorageSize
- if db.preimages != nil {
- preimageSize = db.preimages.size()
- }
- return db.dirtiesSize + db.childrenSize + metadataSize, preimageSize
+ return db.dirtiesSize + db.childrenSize + metadataSize
}
-// GetReader retrieves a node reader belonging to the given state root.
-func (db *Database) GetReader(root common.Hash) Reader {
- return newHashReader(db)
+// Scheme returns the node scheme used in the database.
+func (db *Database) Scheme() string {
+ return rawdb.HashScheme
}
-// hashReader is reader of hashDatabase which implements the Reader interface.
-type hashReader struct {
- db *Database
+// Reader retrieves a node reader belonging to the given state root.
+func (db *Database) Reader(root common.Hash) *reader {
+ return &reader{db: db}
}
-// newHashReader initializes the hash reader.
-func newHashReader(db *Database) *hashReader {
- return &hashReader{db: db}
+// reader is a state reader of Database which implements the Reader interface.
+type reader struct {
+ db *Database
}
-// Node retrieves the RLP-encoded trie node blob with the given node hash.
+// Node retrieves the trie node with the given node hash.
// No error will be returned if the node is not found.
-func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
+func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
blob, _ := reader.db.Node(hash)
return blob, nil
}
-
-// saveCache saves clean state cache to given directory path
-// using specified CPU cores.
-func (db *Database) saveCache(dir string, threads int) error {
- if db.cleans == nil {
- return nil
- }
- log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
-
- start := time.Now()
- err := db.cleans.SaveToFileConcurrent(dir, threads)
- if err != nil {
- log.Error("Failed to persist clean trie cache", "error", err)
- return err
- }
- log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
- return nil
-}
-
-// SaveCache atomically saves fast cache data to the given dir using all
-// available CPU cores.
-func (db *Database) SaveCache(dir string) error {
- return db.saveCache(dir, runtime.GOMAXPROCS(0))
-}
-
-// SaveCachePeriodically atomically saves fast cache data to the given dir with
-// the specified interval. All dump operation will only use a single CPU core.
-func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- db.saveCache(dir, 1)
- case <-stopCh:
- return
- }
- }
-}
-
-// Scheme returns the node scheme used in the database. Right now, we only support hash scheme.
-func (db *Database) Scheme() string {
- return rawdb.HashScheme
-}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
new file mode 100644
index 0000000000..8152eab6c0
--- /dev/null
+++ b/trie/trienode/node.go
@@ -0,0 +1,197 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see
+
+package trienode
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Node is a wrapper which contains the encoded blob of the trie node and its
+// unique hash identifier. It is general enough that can be used to represent
+// trie nodes corresponding to different trie implementations.
+type Node struct {
+ Hash common.Hash // Node hash, empty for deleted node
+ Blob []byte // Encoded node blob, nil for the deleted node
+}
+
+// Size returns the total memory size used by this node.
+func (n *Node) Size() int {
+ return len(n.Blob) + common.HashLength
+}
+
+// IsDeleted returns the indicator if the node is marked as deleted.
+func (n *Node) IsDeleted() bool {
+ return n.Hash == (common.Hash{})
+}
+
+// WithPrev wraps the Node with the previous node value attached.
+type WithPrev struct {
+ *Node
+ Prev []byte // Encoded original value, nil means it's non-existent
+}
+
+// Unwrap returns the internal Node object.
+func (n *WithPrev) Unwrap() *Node {
+ return n.Node
+}
+
+// Size returns the total memory size used by this node. It overloads
+// the function in Node by counting the size of previous value as well.
+func (n *WithPrev) Size() int {
+ return n.Node.Size() + len(n.Prev)
+}
+
+// New constructs a node with provided node information.
+func New(hash common.Hash, blob []byte) *Node {
+ return &Node{Hash: hash, Blob: blob}
+}
+
+// NewWithPrev constructs a node with provided node information.
+func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev {
+ return &WithPrev{
+ Node: New(hash, blob),
+ Prev: prev,
+ }
+}
+
+// leaf represents a trie leaf node
+type leaf struct {
+ Blob []byte // raw blob of leaf
+ Parent common.Hash // the hash of parent node
+}
+
+// NodeSet contains a set of nodes collected during the commit operation.
+// Each node is keyed by path. It's not thread-safe to use.
+type NodeSet struct {
+ Owner common.Hash
+ Leaves []*leaf
+ Nodes map[string]*WithPrev
+ updates int // the count of updated and inserted nodes
+ deletes int // the count of deleted nodes
+}
+
+// NewNodeSet initializes a node set. The owner is zero for the account trie and
+// the owning account address hash for storage tries.
+func NewNodeSet(owner common.Hash) *NodeSet {
+ return &NodeSet{
+ Owner: owner,
+ Nodes: make(map[string]*WithPrev),
+ }
+}
+
+// ForEachWithOrder iterates the nodes with the order from bottom to top,
+// right to left, nodes with the longest path will be iterated first.
+func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
+ var paths sort.StringSlice
+ for path := range set.Nodes {
+ paths = append(paths, path)
+ }
+ // Bottom-up, longest path first
+ sort.Sort(sort.Reverse(paths))
+ for _, path := range paths {
+ callback(path, set.Nodes[path].Unwrap())
+ }
+}
+
+// AddNode adds the provided node into set.
+func (set *NodeSet) AddNode(path []byte, n *WithPrev) {
+ if n.IsDeleted() {
+ set.deletes += 1
+ } else {
+ set.updates += 1
+ }
+ set.Nodes[string(path)] = n
+}
+
+// AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can
+// we get rid of it?
+func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) {
+ set.Leaves = append(set.Leaves, &leaf{Blob: blob, Parent: parent})
+}
+
+// Size returns the number of dirty nodes in set.
+func (set *NodeSet) Size() (int, int) {
+ return set.updates, set.deletes
+}
+
+// Hashes returns the hashes of all updated nodes. TODO(rjl493456442) how can
+// we get rid of it?
+func (set *NodeSet) Hashes() []common.Hash {
+ var ret []common.Hash
+ for _, node := range set.Nodes {
+ ret = append(ret, node.Hash)
+ }
+ return ret
+}
+
+// Summary returns a string-representation of the NodeSet.
+func (set *NodeSet) Summary() string {
+ var out = new(strings.Builder)
+ fmt.Fprintf(out, "nodeset owner: %v\n", set.Owner)
+ if set.Nodes != nil {
+ for path, n := range set.Nodes {
+ // Deletion
+ if n.IsDeleted() {
+ fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev)
+ continue
+ }
+ // Insertion
+ if len(n.Prev) == 0 {
+ fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
+ continue
+ }
+ // Update
+ fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev)
+ }
+ }
+ for _, n := range set.Leaves {
+ fmt.Fprintf(out, "[leaf]: %v\n", n)
+ }
+ return out.String()
+}
+
+// MergedNodeSet represents a merged node set for a group of tries.
+type MergedNodeSet struct {
+ Sets map[common.Hash]*NodeSet
+}
+
+// NewMergedNodeSet initializes an empty merged set.
+func NewMergedNodeSet() *MergedNodeSet {
+ return &MergedNodeSet{Sets: make(map[common.Hash]*NodeSet)}
+}
+
+// NewWithNodeSet constructs a merged nodeset with the provided single set.
+func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
+ merged := NewMergedNodeSet()
+ merged.Merge(set)
+ return merged
+}
+
+// Merge merges the provided dirty nodes of a trie into the set. The assumption
+// is held that no duplicated set belonging to the same trie will be merged twice.
+func (set *MergedNodeSet) Merge(other *NodeSet) error {
+ _, present := set.Sets[other.Owner]
+ if present {
+ return fmt.Errorf("duplicate trie for owner %#x", other.Owner)
+ }
+ set.Sets[other.Owner] = other
+ return nil
+}