Skip to content

Commit

Permalink
Merge pull request #1801 from onflow/fxamacker/tidy-mtrie
Browse files Browse the repository at this point in the history
Update mtrie with some minor fixes and small improvements
  • Loading branch information
fxamacker authored Jan 13, 2022
2 parents 8fcec1e + d432c7c commit defb54a
Show file tree
Hide file tree
Showing 12 changed files with 62 additions and 78 deletions.
2 changes: 1 addition & 1 deletion ledger/common/encoding/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func CheckVersion(rawInput []byte) (rest []byte, version uint16, err error) {
}

// CheckType extracts encoding byte from a raw encoded message
// checks it against the supported versions and returns the rest of rawInput (excluding encDecVersion bytes)
// checks it against expected type and returns the rest of rawInput (excluding type byte)
func CheckType(rawInput []byte, expectedType uint8) (rest []byte, err error) {
t, r, err := utils.ReadUint8(rawInput)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion ledger/common/proof/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ func VerifyTrieProof(p *ledger.TrieProof, expectedState ledger.State) bool {
}
// We start with the leaf and hash our way upwards towards the root
proofIndex := len(p.Interims) - 1 // the index of the last non-default value furthest down the tree (-1 if there is none)
computed := ledger.ComputeCompactValue(hash.Hash(p.Path), p.Payload.Value, leafHeight) // we first compute the hash of the fully-expanded leaf (at height 0)
computed := ledger.ComputeCompactValue(hash.Hash(p.Path), p.Payload.Value, leafHeight) // we first compute the hash of the compact leaf (at height leafHeight)
for h := leafHeight + 1; h <= treeHeight; h++ { // then, we hash our way upwards until we hit the root (at height `treeHeight`)
// we are currently at a node n (initially the leaf). In this iteration, we want to compute the
// parent's hash. Here, h is the height of the parent, whose hash want to compute.
Expand Down
1 change: 1 addition & 0 deletions ledger/common/utils/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ func ReadShortData(input []byte) (data []byte, rest []byte, err error) {
return nil, rest, err
}
data = rest[:size]
rest = rest[size:]
return
}

Expand Down
2 changes: 1 addition & 1 deletion ledger/complete/mtrie/flattener/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func ReadStorableNode(reader io.Reader) (*StorableNode, error) {

// EncodeStorableTrie encodes StorableTrie
func EncodeStorableTrie(storableTrie *StorableTrie) []byte {
length := 8 + 2 + len(storableTrie.RootHash)
length := 2 + 8 + 2 + len(storableTrie.RootHash)
buf := make([]byte, 0, length)
// 2-bytes encoding version
buf = utils.AppendUint16(buf, encodingDecodingVersion)
Expand Down
50 changes: 25 additions & 25 deletions ledger/complete/mtrie/forest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,11 @@ func TestTrieOperations(t *testing.T) {
retnt, err := forest.GetTrie(updatedTrie.RootHash())
require.NoError(t, err)
require.Equal(t, retnt.RootHash(), updatedTrie.RootHash())
require.Equal(t, forest.Size(), 2)
require.Equal(t, 2, forest.Size())

// Remove trie
forest.RemoveTrie(updatedTrie.RootHash())
require.Equal(t, forest.Size(), 1)
require.Equal(t, 1, forest.Size())
}

// TestTrieUpdate updates the empty trie with some values and verifies that the
Expand Down Expand Up @@ -104,8 +104,8 @@ func TestLeftEmptyInsert(t *testing.T) {

baseTrie, err := forest.GetTrie(baseRoot)
require.NoError(t, err)
require.Equal(t, baseTrie.MaxDepth(), uint16(2))
require.Equal(t, baseTrie.AllocatedRegCount(), uint64(2))
require.Equal(t, uint16(2), baseTrie.MaxDepth())
require.Equal(t, uint64(2), baseTrie.AllocatedRegCount())
fmt.Println("BASE TRIE:")
fmt.Println(baseTrie.String())

Expand All @@ -120,8 +120,8 @@ func TestLeftEmptyInsert(t *testing.T) {

updatedTrie, err := forest.GetTrie(updatedRoot)
require.NoError(t, err)
require.Equal(t, updatedTrie.MaxDepth(), uint16(2))
require.Equal(t, updatedTrie.AllocatedRegCount(), uint64(3))
require.Equal(t, uint16(2), updatedTrie.MaxDepth())
require.Equal(t, uint64(3), updatedTrie.AllocatedRegCount())
fmt.Println("UPDATED TRIE:")
fmt.Println(updatedTrie.String())
paths = []ledger.Path{p1, p2, p3}
Expand All @@ -134,7 +134,7 @@ func TestLeftEmptyInsert(t *testing.T) {
}
}

// TestLeftEmptyInsert tests inserting a new value into an empty sub-trie:
// TestRightEmptyInsert tests inserting a new value into an empty sub-trie:
// 1. we first construct a baseTrie holding a couple of values on the left branch [~]
// 2. we update a previously non-existent register on the right branch (X)
// We verify that values for _all_ paths in the updated Trie have correct payloads
Expand Down Expand Up @@ -164,8 +164,8 @@ func TestRightEmptyInsert(t *testing.T) {

baseTrie, err := forest.GetTrie(baseRoot)
require.NoError(t, err)
require.Equal(t, baseTrie.MaxDepth(), uint16(2))
require.Equal(t, baseTrie.AllocatedRegCount(), uint64(2))
require.Equal(t, uint16(2), baseTrie.MaxDepth())
require.Equal(t, uint64(2), baseTrie.AllocatedRegCount())
fmt.Println("BASE TRIE:")
fmt.Println(baseTrie.String())

Expand All @@ -181,8 +181,8 @@ func TestRightEmptyInsert(t *testing.T) {

updatedTrie, err := forest.GetTrie(updatedRoot)
require.NoError(t, err)
require.Equal(t, updatedTrie.MaxDepth(), uint16(2))
require.Equal(t, updatedTrie.AllocatedRegCount(), uint64(3))
require.Equal(t, uint16(2), updatedTrie.MaxDepth())
require.Equal(t, uint64(3), updatedTrie.AllocatedRegCount())
fmt.Println("UPDATED TRIE:")
fmt.Println(updatedTrie.String())

Expand Down Expand Up @@ -224,8 +224,8 @@ func TestExpansionInsert(t *testing.T) {

baseTrie, err := forest.GetTrie(baseRoot)
require.NoError(t, err)
require.Equal(t, baseTrie.MaxDepth(), uint16(0))
require.Equal(t, baseTrie.AllocatedRegCount(), uint64(1))
require.Equal(t, uint16(0), baseTrie.MaxDepth())
require.Equal(t, uint64(1), baseTrie.AllocatedRegCount())
fmt.Println("BASE TRIE:")
fmt.Println(baseTrie.String())

Expand All @@ -241,8 +241,8 @@ func TestExpansionInsert(t *testing.T) {

updatedTrie, err := forest.GetTrie(updatedRoot)
require.NoError(t, err)
require.Equal(t, updatedTrie.MaxDepth(), uint16(7))
require.Equal(t, updatedTrie.AllocatedRegCount(), uint64(2))
require.Equal(t, uint16(7), updatedTrie.MaxDepth())
require.Equal(t, uint64(2), updatedTrie.AllocatedRegCount())
fmt.Println("UPDATED TRIE:")
fmt.Println(updatedTrie.String())

Expand Down Expand Up @@ -293,12 +293,12 @@ func TestFullHouseInsert(t *testing.T) {

baseTrie, err := forest.GetTrie(baseRoot)
require.NoError(t, err)
require.Equal(t, baseTrie.MaxDepth(), uint16(2))
require.Equal(t, baseTrie.AllocatedRegCount(), uint64(3))
require.Equal(t, uint16(2), baseTrie.MaxDepth())
require.Equal(t, uint64(3), baseTrie.AllocatedRegCount())
fmt.Println("BASE TRIE:")
fmt.Println(baseTrie.String())

// we update value for path p1 and in addition add p3 that has the same prefix `10` as p0
// we update value for path p1 and in addition add p3 that has the same prefix `10` as p1
v1 = payloadBySlices([]byte{'X'}, []byte{'X'})

// path: 1010...
Expand All @@ -313,8 +313,8 @@ func TestFullHouseInsert(t *testing.T) {

updatedTrie, err := forest.GetTrie(updatedRoot)
require.NoError(t, err)
require.Equal(t, updatedTrie.MaxDepth(), uint16(3))
require.Equal(t, updatedTrie.AllocatedRegCount(), uint64(4))
require.Equal(t, uint16(3), updatedTrie.MaxDepth())
require.Equal(t, uint64(4), updatedTrie.AllocatedRegCount())
fmt.Println("UPDATED TRIE:")
fmt.Println(updatedTrie.String())

Expand Down Expand Up @@ -359,8 +359,8 @@ func TestLeafInsert(t *testing.T) {

updatedTrie, err := forest.GetTrie(updatedRoot)
require.NoError(t, err)
require.Equal(t, updatedTrie.MaxDepth(), uint16(256))
require.Equal(t, updatedTrie.AllocatedRegCount(), uint64(2))
require.Equal(t, uint16(256), updatedTrie.MaxDepth())
require.Equal(t, uint64(2), updatedTrie.AllocatedRegCount())
fmt.Println("TRIE:")
fmt.Println(updatedTrie.String())

Expand Down Expand Up @@ -495,14 +495,14 @@ func TestReadOrder(t *testing.T) {
read := &ledger.TrieRead{RootHash: testRoot, Paths: []ledger.Path{p1, p2}}
retPayloads, err := forest.Read(read)
require.NoError(t, err)
require.Equal(t, len(retPayloads), len(payloads))
require.Equal(t, len(read.Paths), len(retPayloads))
require.True(t, bytes.Equal(encoding.EncodePayload(retPayloads[0]), encoding.EncodePayload(payloads[0])))
require.True(t, bytes.Equal(encoding.EncodePayload(retPayloads[1]), encoding.EncodePayload(payloads[1])))

read = &ledger.TrieRead{RootHash: testRoot, Paths: []ledger.Path{p2, p1}}
retPayloads, err = forest.Read(read)
require.NoError(t, err)
require.Equal(t, len(retPayloads), len(payloads))
require.Equal(t, len(read.Paths), len(retPayloads))
require.True(t, bytes.Equal(encoding.EncodePayload(retPayloads[1]), encoding.EncodePayload(payloads[0])))
require.True(t, bytes.Equal(encoding.EncodePayload(retPayloads[0]), encoding.EncodePayload(payloads[1])))
}
Expand Down Expand Up @@ -570,7 +570,7 @@ func TestReadWithDuplicatedKeys(t *testing.T) {
read := &ledger.TrieRead{RootHash: updatedRoot, Paths: paths}
retPayloads, err := forest.Read(read)
require.NoError(t, err)
require.Equal(t, len(expectedPayloads), len(retPayloads))
require.Equal(t, len(read.Paths), len(retPayloads))
for i := range paths {
require.True(t, bytes.Equal(encoding.EncodePayload(retPayloads[i]), encoding.EncodePayload(expectedPayloads[i])))
}
Expand Down
42 changes: 22 additions & 20 deletions ledger/complete/mtrie/node/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ func Test_ProperLeaf(t *testing.T) {
require.True(t, n.VerifyCachedHash())
}

// Test_ProperLeaf verifies that the hash value of a compactified leaf (at height > 0) is computed correctly.
// Here, we test with 16bit keys. Hence, the max height of a compactified leaf can be 16.
// Test_CompactifiedLeaf verifies that the hash value of a compactified leaf (at height > 0) is computed correctly.
// We test the hash at the lowest-possible height (1), for the leaf to be still compactified,
// at an interim height (9) and the max possible height (256)
func Test_CompactifiedLeaf(t *testing.T) {
Expand All @@ -42,20 +41,23 @@ func Test_CompactifiedLeaf(t *testing.T) {
require.Equal(t, expectedRootHashHex, hashToString(n.Hash()))
}

// Test_InterimNode verifies that the hash value of an interim node without children is computed correctly.
// We test the hash at the lowest-possible height (0), at an interim height (9) and the max possible height (256)
// Test_InterimNodeWithoutChildren verifies that the hash value of an interim node without children is computed correctly.
// We test the hash at the lowest-possible height (0), at an interim height (9) and (16)
func Test_InterimNodeWithoutChildren(t *testing.T) {
n := node.NewInterimNode(0, nil, nil)
expectedRootHashHex := "18373b4b038cbbf37456c33941a7e346e752acd8fafa896933d4859002b62619"
require.Equal(t, expectedRootHashHex, hashToString(n.Hash()))
require.Equal(t, ledger.GetDefaultHashForHeight(0), n.Hash())

n = node.NewInterimNode(9, nil, nil)
expectedRootHashHex = "a37f98dbac56e315fbd4b9f9bc85fbd1b138ed4ae453b128c22c99401495af6d"
require.Equal(t, expectedRootHashHex, hashToString(n.Hash()))
require.Equal(t, ledger.GetDefaultHashForHeight(9), n.Hash())

n = node.NewInterimNode(16, nil, nil)
expectedRootHashHex = "6e24e2397f130d9d17bef32b19a77b8f5bcf03fb7e9e75fd89b8a455675d574a"
require.Equal(t, expectedRootHashHex, hashToString(n.Hash()))
require.Equal(t, ledger.GetDefaultHashForHeight(16), n.Hash())
}

// Test_InterimNodeWithOneChild verifies that the hash value of an interim node with
Expand Down Expand Up @@ -96,32 +98,33 @@ func Test_MaxDepth(t *testing.T) {

n1 := node.NewLeaf(path, payload, 0)
n2 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 1)

n4 := node.NewInterimNode(1, n1, n2)
n5 := node.NewInterimNode(1, n4, n3)
require.Equal(t, n5.MaxDepth(), uint16(2))
n5 := node.NewInterimNode(2, n4, n3)
require.Equal(t, uint16(2), n5.MaxDepth())
}

func Test_RegCount(t *testing.T) {
path := utils.PathByUint16(1)
payload := utils.LightPayload(2, 3)
n1 := node.NewLeaf(path, payload, 0)
n2 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 1)

n4 := node.NewInterimNode(1, n1, n2)
n5 := node.NewInterimNode(1, n4, n3)
require.Equal(t, n5.RegCount(), uint64(3))
n5 := node.NewInterimNode(2, n4, n3)
require.Equal(t, uint64(3), n5.RegCount())
}

func Test_AllPayloads(t *testing.T) {
path := utils.PathByUint16(1)
payload := utils.LightPayload(2, 3)
n1 := node.NewLeaf(path, payload, 0)
n2 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 1)
n4 := node.NewInterimNode(1, n1, n2)
n5 := node.NewInterimNode(1, n4, n3)
n5 := node.NewInterimNode(2, n4, n3)
require.Equal(t, 3, len(n5.AllPayloads()))
}

Expand All @@ -130,9 +133,9 @@ func Test_VerifyCachedHash(t *testing.T) {
payload := utils.LightPayload(2, 3)
n1 := node.NewLeaf(path, payload, 0)
n2 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 0)
n3 := node.NewLeaf(path, payload, 1)
n4 := node.NewInterimNode(1, n1, n2)
n5 := node.NewInterimNode(1, n4, n3)
n5 := node.NewInterimNode(2, n4, n3)
require.True(t, n5.VerifyCachedHash())
}

Expand All @@ -156,8 +159,7 @@ func Test_Compactify_EmptySubtrie(t *testing.T) {
})

t.Run("both children nil", func(t *testing.T) {
require.Nil(t, node.NewInterimCompactifiedNode(5, nil, n2))
require.Nil(t, node.NewInterimCompactifiedNode(5, n1, nil))
require.Nil(t, node.NewInterimCompactifiedNode(5, nil, nil))
})
}

Expand Down Expand Up @@ -236,7 +238,7 @@ func Test_Compactify_EmptyChild(t *testing.T) {
require.Equal(t, n3, nn5.LeftChild())
require.Nil(t, nn5.RightChild())
require.True(t, nn5.VerifyCachedHash())
require.Equal(t, nn5.Hash(), n5.Hash())
require.Equal(t, n5.Hash(), nn5.Hash())
require.Equal(t, uint16(2), nn5.MaxDepth())
require.Equal(t, uint64(2), nn5.RegCount())
})
Expand All @@ -260,7 +262,7 @@ func Test_Compactify_EmptyChild(t *testing.T) {
require.Nil(t, nn5.LeftChild())
require.Equal(t, n4, nn5.RightChild())
require.True(t, nn5.VerifyCachedHash())
require.Equal(t, nn5.Hash(), n5.Hash())
require.Equal(t, n5.Hash(), nn5.Hash())
require.Equal(t, uint16(2), nn5.MaxDepth())
require.Equal(t, uint64(2), nn5.RegCount())
})
Expand Down Expand Up @@ -302,7 +304,7 @@ func Test_Compactify_BothChildrenPopulated(t *testing.T) {
require.Equal(t, nn3, nn5.LeftChild())
require.Equal(t, n4, nn5.RightChild())
require.True(t, nn5.VerifyCachedHash())
require.Equal(t, nn5.Hash(), n5.Hash())
require.Equal(t, n5.Hash(), nn5.Hash())
require.Equal(t, uint16(2), nn5.MaxDepth())
require.Equal(t, uint64(3), nn5.RegCount())
}
Expand All @@ -324,7 +326,7 @@ func requireIsLeafWithHash(t *testing.T, node *node.Node, expectedHash hash.Hash
require.Nil(t, node.RightChild())
require.Equal(t, uint16(0), node.MaxDepth())
require.Equal(t, uint64(1), node.RegCount())
require.Equal(t, node.Hash(), expectedHash)
require.Equal(t, expectedHash, node.Hash())
require.True(t, node.VerifyCachedHash())
require.True(t, node.IsLeaf())
}
3 changes: 2 additions & 1 deletion ledger/complete/mtrie/trie/trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ func update(
// in the remaining code: the registers to update are strictly larger than 1:
// - either len(paths)>1
// - or len(paths) == 1 and compactLeaf!= nil
// - or len(paths) == 1 and parentNode != nil && !parentNode.IsLeaf()

// Split paths and payloads to recurse:
// lpaths contains all paths that have `0` at the partitionIndex
Expand Down Expand Up @@ -500,7 +501,7 @@ func (mt *MTrie) IsAValidTrie() bool {
//
// For instance, if `paths` contains the following 3 paths, and bitIndex is `1`:
// [[0,0,1,1], [0,1,0,1], [0,0,0,1]]
// then `splitByPath` returns 1 and updates `paths` into:
// then `splitByPath` returns 2 and updates `paths` into:
// [[0,0,1,1], [0,0,0,1], [0,1,0,1]]
func splitByPath(paths []ledger.Path, payloads []ledger.Payload, bitIndex int) int {
i := 0
Expand Down
2 changes: 1 addition & 1 deletion ledger/complete/mtrie/trie/trie_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func sampleRandomRegisterWrites(rng *LinearCongruentialGenerator, number int) ([
return paths, payloads
}

// sampleRandomRegisterWrites generates path-payload tuples for `number` randomly selected registers;
// sampleRandomRegisterWritesWithPrefix generates path-payload tuples for `number` randomly selected registers;
// each path is starting with the specified `prefix` and is filled to the full length with random bytes
// caution: register paths might repeat
func sampleRandomRegisterWritesWithPrefix(rng *LinearCongruentialGenerator, number int, prefix []byte) ([]ledger.Path, []ledger.Payload) {
Expand Down
2 changes: 1 addition & 1 deletion ledger/complete/wal/checkpointer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func Test_Checkpointing(t *testing.T) {
require.FileExists(t, path.Join(dir, "00000010")) //make sure we have enough segments saved
})

// create a new forest and reply WAL
// create a new forest and replay WAL
f2, err := mtrie.NewForest(size*10, metricsCollector, func(tree *trie.MTrie) error { return nil })
require.NoError(t, err)

Expand Down
5 changes: 3 additions & 2 deletions ledger/complete/wal/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@ The LedgerWAL update record uses two operations so far - an update which must in
which only needs a root tree state commitment.
Updates need to be atomic, hence we prepare binary representation of whole changeset.
Since keys, values and state commitments date types are variable length, we have to store it as well.
Every record has:
If OP = WALDelete, record has:
1 byte Operation Type | 2 bytes Big Endian uint16 length of state commitment | state commitment data
If OP = WALUpdate, then it follow with:
If OP = WALUpdate, record has:
1 byte Operation Type | 2 bytes version | 1 byte TypeTrieUpdate | 2 bytes Big Endian uint16 length of state commitment | state commitment data |
4 bytes Big Endian uint32 - total number of key/value pairs | 2 bytes Big Endian uint16 - length of key (keys are the same length)
and for every pair after
Expand Down
Loading

0 comments on commit defb54a

Please sign in to comment.