diff --git a/cannon/mipsevm/memory/binary_tree.go b/cannon/mipsevm/memory/binary_tree.go new file mode 100644 index 0000000000000..5888ccc41468c --- /dev/null +++ b/cannon/mipsevm/memory/binary_tree.go @@ -0,0 +1,118 @@ +package memory + +import ( + "math/bits" +) + +// BinaryTreeIndex is a representation of the state of the memory in a binary merkle tree. +type BinaryTreeIndex struct { + // generalized index -> merkle root or nil if invalidated + nodes map[uint64]*[32]byte + // Reference to the page table from Memory. + pageTable map[Word]*CachedPage +} + +func NewBinaryTreeMemory() *Memory { + pages := make(map[Word]*CachedPage) + index := NewBinaryTreeIndex(pages) + return &Memory{ + merkleIndex: index, + pageTable: pages, + lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages + } +} + +func NewBinaryTreeIndex(pages map[Word]*CachedPage) *BinaryTreeIndex { + return &BinaryTreeIndex{ + nodes: make(map[uint64]*[32]byte), + pageTable: pages, + } +} + +func (m *BinaryTreeIndex) New(pages map[Word]*CachedPage) PageIndex { + x := NewBinaryTreeIndex(pages) + return x +} + +func (m *BinaryTreeIndex) Invalidate(addr Word) { + // find the gindex of the first page covering the address: i.e. ((1 << WordSize) | addr) >> PageAddrSize + // Avoid 64-bit overflow by distributing the right shift across the OR. + gindex := (uint64(1) << (WordSize - PageAddrSize)) | uint64(addr>>PageAddrSize) + + for gindex > 0 { + m.nodes[gindex] = nil + gindex >>= 1 + } +} + +func (m *BinaryTreeIndex) MerkleizeSubtree(gindex uint64) [32]byte { + l := uint64(bits.Len64(gindex)) + if l > MemProofLeafCount { + panic("gindex too deep") + } + if l > PageKeySize { + depthIntoPage := l - 1 - PageKeySize + pageIndex := (gindex >> depthIntoPage) & PageKeyMask + if p, ok := m.pageTable[Word(pageIndex)]; ok { + pageGindex := (1 << depthIntoPage) | (gindex & ((1 << depthIntoPage) - 1)) + return p.MerkleizeSubtree(pageGindex) + } else { + return zeroHashes[MemProofLeafCount-l] // page does not exist + } + } + n, ok := m.nodes[gindex] + if !ok { + // if the node doesn't exist, the whole sub-tree is zeroed + return zeroHashes[MemProofLeafCount-l] + } + if n != nil { + return *n + } + left := m.MerkleizeSubtree(gindex << 1) + right := m.MerkleizeSubtree((gindex << 1) | 1) + r := HashPair(left, right) + m.nodes[gindex] = &r + return r +} + +func (m *BinaryTreeIndex) MerkleProof(addr Word) (out [MemProofSize]byte) { + proof := m.traverseBranch(1, addr, 0) + // encode the proof + for i := 0; i < MemProofLeafCount; i++ { + copy(out[i*32:(i+1)*32], proof[i][:]) + } + return out +} + +func (m *BinaryTreeIndex) traverseBranch(parent uint64, addr Word, depth uint8) (proof [][32]byte) { + if depth == WordSize-5 { + proof = make([][32]byte, 0, WordSize-5+1) + proof = append(proof, m.MerkleizeSubtree(parent)) + return + } + if depth > WordSize-5 { + panic("traversed too deep") + } + self := parent << 1 + sibling := self | 1 + if addr&(1<<((WordSize-1)-depth)) != 0 { + self, sibling = sibling, self + } + proof = m.traverseBranch(self, addr, depth+1) + siblingNode := m.MerkleizeSubtree(sibling) + proof = append(proof, siblingNode) + return +} + +func (m *BinaryTreeIndex) MerkleRoot() [32]byte { + return m.MerkleizeSubtree(1) +} + +func (m *BinaryTreeIndex) AddPage(pageIndex Word) { + // make nodes to root + k := (1 << PageKeySize) | uint64(pageIndex) + for k > 0 { + m.nodes[k] = nil + k >>= 1 + } +} diff --git a/cannon/mipsevm/memory/memory.go b/cannon/mipsevm/memory/memory.go index 375aac4992051..aef95b901f859 100644 --- a/cannon/mipsevm/memory/memory.go +++ b/cannon/mipsevm/memory/memory.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "math/bits" "slices" "sort" @@ -45,14 +44,11 @@ var zeroHashes = func() [256][32]byte { }() type Memory struct { - // generalized index -> merkle root or nil if invalidated - nodes map[uint64]*[32]byte - - // pageIndex -> cached page - pages map[Word]*CachedPage - - // Note: since we don't de-alloc pages, we don't do ref-counting. - // Once a page exists, it doesn't leave memory + merkleIndex PageIndex + // Note: since we don't de-alloc Pages, we don't do ref-counting. + // Once a page exists, it doesn't leave memory. + // This map will usually be shared with the PageIndex as well. + pageTable map[Word]*CachedPage // two caches: we often read instructions from one page, and do memory things with another page. // this prevents map lookups each instruction @@ -60,20 +56,34 @@ type Memory struct { lastPage [2]*CachedPage } +type PageIndex interface { + MerkleRoot() [32]byte + AddPage(pageIndex Word) + MerkleProof(addr Word) [MemProofSize]byte + MerkleizeSubtree(gindex uint64) [32]byte + Invalidate(addr Word) + + New(pages map[Word]*CachedPage) PageIndex +} + func NewMemory() *Memory { - return &Memory{ - nodes: make(map[uint64]*[32]byte), - pages: make(map[Word]*CachedPage), - lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages - } + return NewBinaryTreeMemory() +} + +func (m *Memory) MerkleRoot() [32]byte { + return m.MerkleizeSubtree(1) +} + +func (m *Memory) MerkleProof(addr Word) [MemProofSize]byte { + return m.merkleIndex.MerkleProof(addr) } func (m *Memory) PageCount() int { - return len(m.pages) + return len(m.pageTable) } func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error { - for pageIndex, cachedPage := range m.pages { + for pageIndex, cachedPage := range m.pageTable { if err := fn(pageIndex, cachedPage.Data); err != nil { return err } @@ -82,69 +92,10 @@ func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error { } func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte { - l := uint64(bits.Len64(gindex)) - if l > MemProofLeafCount { - panic("gindex too deep") - } - if l > PageKeySize { - depthIntoPage := l - 1 - PageKeySize - pageIndex := (gindex >> depthIntoPage) & PageKeyMask - if p, ok := m.pages[Word(pageIndex)]; ok { - pageGindex := (1 << depthIntoPage) | (gindex & ((1 << depthIntoPage) - 1)) - return p.MerkleizeSubtree(pageGindex) - } else { - return zeroHashes[MemProofLeafCount-l] // page does not exist - } - } - n, ok := m.nodes[gindex] - if !ok { - // if the node doesn't exist, the whole sub-tree is zeroed - return zeroHashes[MemProofLeafCount-l] - } - if n != nil { - return *n - } - left := m.MerkleizeSubtree(gindex << 1) - right := m.MerkleizeSubtree((gindex << 1) | 1) - r := HashPair(left, right) - m.nodes[gindex] = &r - return r -} - -func (m *Memory) MerkleProof(addr Word) (out [MemProofSize]byte) { - proof := m.traverseBranch(1, addr, 0) - // encode the proof - for i := 0; i < MemProofLeafCount; i++ { - copy(out[i*32:(i+1)*32], proof[i][:]) - } - return out + return m.merkleIndex.MerkleizeSubtree(gindex) } -func (m *Memory) traverseBranch(parent uint64, addr Word, depth uint8) (proof [][32]byte) { - if depth == WordSize-5 { - proof = make([][32]byte, 0, WordSize-5+1) - proof = append(proof, m.MerkleizeSubtree(parent)) - return - } - if depth > WordSize-5 { - panic("traversed too deep") - } - self := parent << 1 - sibling := self | 1 - if addr&(1<<((WordSize-1)-depth)) != 0 { - self, sibling = sibling, self - } - proof = m.traverseBranch(self, addr, depth+1) - siblingNode := m.MerkleizeSubtree(sibling) - proof = append(proof, siblingNode) - return -} - -func (m *Memory) MerkleRoot() [32]byte { - return m.MerkleizeSubtree(1) -} - -func (m *Memory) pageLookup(pageIndex Word) (*CachedPage, bool) { +func (m *Memory) PageLookup(pageIndex Word) (*CachedPage, bool) { // hit caches if pageIndex == m.lastPageKeys[0] { return m.lastPage[0], true @@ -152,7 +103,7 @@ func (m *Memory) pageLookup(pageIndex Word) (*CachedPage, bool) { if pageIndex == m.lastPageKeys[1] { return m.lastPage[1], true } - p, ok := m.pages[pageIndex] + p, ok := m.pageTable[pageIndex] // only cache existing pages. if ok { @@ -165,6 +116,30 @@ func (m *Memory) pageLookup(pageIndex Word) (*CachedPage, bool) { return p, ok } +func (m *Memory) SetMemoryRange(addr Word, r io.Reader) error { + for { + pageIndex := addr >> PageAddrSize + pageAddr := addr & PageAddrMask + readLen := PageSize - pageAddr + chunk := make([]byte, readLen) + n, err := r.Read(chunk) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + p, ok := m.PageLookup(pageIndex) + if !ok { + p = m.AllocPage(pageIndex) + } + p.InvalidateFull() + copy(p.Data[pageAddr:], chunk[:n]) + addr += Word(n) + } +} + // SetWord stores [arch.Word] sized values at the specified address func (m *Memory) SetWord(addr Word, v Word) { // addr must be aligned to WordSizeBytes bytes @@ -174,7 +149,7 @@ func (m *Memory) SetWord(addr Word, v Word) { pageIndex := addr >> PageAddrSize pageAddr := addr & PageAddrMask - p, ok := m.pageLookup(pageIndex) + p, ok := m.PageLookup(pageIndex) if !ok { // allocate the page if we have not already. // Go may mmap relatively large ranges, but we only allocate the pages just in time. @@ -182,17 +157,8 @@ func (m *Memory) SetWord(addr Word, v Word) { } else { prevValid := p.Ok[1] p.invalidate(pageAddr) - if prevValid { // if the page was already invalid before, then nodes to mem-root will also still be. - - // find the gindex of the first page covering the address: i.e. ((1 << WordSize) | addr) >> PageAddrSize - // Avoid 64-bit overflow by distributing the right shift across the OR. - gindex := (uint64(1) << (WordSize - PageAddrSize)) | uint64(addr>>PageAddrSize) - - for gindex > 0 { - m.nodes[gindex] = nil - gindex >>= 1 - } - + if prevValid { + m.merkleIndex.Invalidate(addr) // invalidate this branch of memory, now that the value changed } } arch.ByteOrderWord.PutWord(p.Data[pageAddr:pageAddr+arch.WordSizeBytes], v) @@ -205,7 +171,8 @@ func (m *Memory) GetWord(addr Word) Word { if addr&arch.ExtMask != 0 { panic(fmt.Errorf("unaligned memory access: %x", addr)) } - p, ok := m.pageLookup(addr >> PageAddrSize) + pageIndex := addr >> PageAddrSize + p, ok := m.PageLookup(pageIndex) if !ok { return 0 } @@ -215,75 +182,82 @@ func (m *Memory) GetWord(addr Word) Word { func (m *Memory) AllocPage(pageIndex Word) *CachedPage { p := &CachedPage{Data: new(Page)} - m.pages[pageIndex] = p - // make nodes to root - k := (1 << PageKeySize) | uint64(pageIndex) - for k > 0 { - m.nodes[k] = nil - k >>= 1 - } + m.pageTable[pageIndex] = p + m.merkleIndex.AddPage(pageIndex) return p } -type pageEntry struct { - Index Word `json:"index"` - Data *Page `json:"data"` +type memReader struct { + m *Memory + addr Word + count Word } -func (m *Memory) MarshalJSON() ([]byte, error) { // nosemgrep - pages := make([]pageEntry, 0, len(m.pages)) - for k, p := range m.pages { - pages = append(pages, pageEntry{ - Index: k, - Data: p.Data, - }) +func (m *Memory) ReadMemoryRange(addr Word, count Word) io.Reader { + return &memReader{m: m, addr: addr, count: count} +} +func (r *memReader) Read(dest []byte) (n int, err error) { + if r.count == 0 { + return 0, io.EOF } - sort.Slice(pages, func(i, j int) bool { - return pages[i].Index < pages[j].Index - }) - return json.Marshal(pages) + + // Keep iterating over memory until we have all our data. + // It may wrap around the address range, and may not be aligned + endAddr := r.addr + r.count + + pageIndex := r.addr >> PageAddrSize + start := r.addr & PageAddrMask + end := Word(PageSize) + + if pageIndex == (endAddr >> PageAddrSize) { + end = endAddr & PageAddrMask + } + p, ok := r.m.PageLookup(pageIndex) + if ok { + n = copy(dest, p.Data[start:end]) + } else { + n = copy(dest, make([]byte, end-start)) // default to zeroes + } + r.addr += Word(n) + r.count -= Word(n) + return n, nil } -func (m *Memory) UnmarshalJSON(data []byte) error { - var pages []pageEntry - if err := json.Unmarshal(data, &pages); err != nil { - return err +func (m *Memory) UsageRaw() uint64 { + return uint64(len(m.pageTable)) * PageSize +} + +func (m *Memory) Usage() string { + total := m.UsageRaw() + const unit = 1024 + if total < unit { + return fmt.Sprintf("%d B", total) } - m.nodes = make(map[uint64]*[32]byte) - m.pages = make(map[Word]*CachedPage) - m.lastPageKeys = [2]Word{^Word(0), ^Word(0)} - m.lastPage = [2]*CachedPage{nil, nil} - for i, p := range pages { - if _, ok := m.pages[p.Index]; ok { - return fmt.Errorf("cannot load duplicate page, entry %d, page index %d", i, p.Index) - } - m.AllocPage(p.Index).Data = p.Data + div, exp := uint64(unit), 0 + for n := total / unit; n >= unit; n /= unit { + div *= unit + exp++ } - return nil + // KiB, MiB, GiB, TiB, ... + return fmt.Sprintf("%.1f %ciB", float64(total)/float64(div), "KMGTPE"[exp]) } -func (m *Memory) SetMemoryRange(addr Word, r io.Reader) error { - for { - pageIndex := addr >> PageAddrSize - pageAddr := addr & PageAddrMask - readLen := PageSize - pageAddr - chunk := make([]byte, readLen) - n, err := r.Read(chunk) - if err != nil { - if err == io.EOF { - return nil - } - return err - } +func (m *Memory) Copy() *Memory { + pages := make(map[Word]*CachedPage) + table := m.merkleIndex.New(pages) + out := &Memory{ + merkleIndex: table, + pageTable: pages, + lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages + lastPage: [2]*CachedPage{nil, nil}, + } - p, ok := m.pageLookup(pageIndex) - if !ok { - p = m.AllocPage(pageIndex) - } - p.InvalidateFull() - copy(p.Data[pageAddr:], chunk[:n]) - addr += Word(n) + for k, page := range m.pageTable { + data := new(Page) + *data = *page.Data + out.AllocPage(k).Data = data } + return out } // Serialize writes the memory in a simple binary format which can be read again using Deserialize @@ -299,11 +273,11 @@ func (m *Memory) Serialize(out io.Writer) error { if err := binary.Write(out, binary.BigEndian, Word(m.PageCount())); err != nil { return err } - indexes := maps.Keys(m.pages) + indexes := maps.Keys(m.pageTable) // iterate sorted map keys for consistent serialization slices.Sort(indexes) for _, pageIndex := range indexes { - page := m.pages[pageIndex] + page := m.pageTable[pageIndex] if err := binary.Write(out, binary.BigEndian, pageIndex); err != nil { return err } @@ -332,72 +306,35 @@ func (m *Memory) Deserialize(in io.Reader) error { return nil } -func (m *Memory) Copy() *Memory { - out := NewMemory() - out.nodes = make(map[uint64]*[32]byte) - out.pages = make(map[Word]*CachedPage) - out.lastPageKeys = [2]Word{^Word(0), ^Word(0)} - out.lastPage = [2]*CachedPage{nil, nil} - for k, page := range m.pages { - data := new(Page) - *data = *page.Data - out.AllocPage(k).Data = data - } - return out -} - -type memReader struct { - m *Memory - addr Word - count Word +type pageEntry struct { + Index Word `json:"index"` + Data *Page `json:"data"` } -func (r *memReader) Read(dest []byte) (n int, err error) { - if r.count == 0 { - return 0, io.EOF - } - - // Keep iterating over memory until we have all our data. - // It may wrap around the address range, and may not be aligned - endAddr := r.addr + r.count - - pageIndex := r.addr >> PageAddrSize - start := r.addr & PageAddrMask - end := Word(PageSize) - - if pageIndex == (endAddr >> PageAddrSize) { - end = endAddr & PageAddrMask - } - p, ok := r.m.pageLookup(pageIndex) - if ok { - n = copy(dest, p.Data[start:end]) - } else { - n = copy(dest, make([]byte, end-start)) // default to zeroes +func (m *Memory) MarshalJSON() ([]byte, error) { // nosemgrep + pages := make([]pageEntry, 0, len(m.pageTable)) + for k, p := range m.pageTable { + pages = append(pages, pageEntry{ + Index: k, + Data: p.Data, + }) } - r.addr += Word(n) - r.count -= Word(n) - return n, nil -} - -func (m *Memory) ReadMemoryRange(addr Word, count Word) io.Reader { - return &memReader{m: m, addr: addr, count: count} -} - -func (m *Memory) UsageRaw() uint64 { - return uint64(len(m.pages)) * PageSize + sort.Slice(pages, func(i, j int) bool { + return pages[i].Index < pages[j].Index + }) + return json.Marshal(pages) } -func (m *Memory) Usage() string { - total := m.UsageRaw() - const unit = 1024 - if total < unit { - return fmt.Sprintf("%d B", total) +func (m *Memory) UnmarshalJSON(data []byte) error { + var pages []pageEntry + if err := json.Unmarshal(data, &pages); err != nil { + return err } - div, exp := uint64(unit), 0 - for n := total / unit; n >= unit; n /= unit { - div *= unit - exp++ + for i, p := range pages { + if _, ok := m.pageTable[p.Index]; ok { + return fmt.Errorf("cannot load duplicate page, entry %d, page index %d", i, p.Index) + } + m.AllocPage(p.Index).Data = p.Data } - // KiB, MiB, GiB, TiB, ... - return fmt.Sprintf("%.1f %ciB", float64(total)/float64(div), "KMGTPE"[exp]) + return nil } diff --git a/cannon/mipsevm/memory/memory_test.go b/cannon/mipsevm/memory/memory32_binary_tree_test.go similarity index 99% rename from cannon/mipsevm/memory/memory_test.go rename to cannon/mipsevm/memory/memory32_binary_tree_test.go index 5100de5b41596..d7597eaace007 100644 --- a/cannon/mipsevm/memory/memory_test.go +++ b/cannon/mipsevm/memory/memory32_binary_tree_test.go @@ -231,7 +231,7 @@ func TestMemoryJSON(t *testing.T) { m.SetWord(8, 0xAABBCCDD) dat, err := json.Marshal(m) require.NoError(t, err) - var res Memory + res := NewMemory() require.NoError(t, json.Unmarshal(dat, &res)) require.Equal(t, uint32(0xAABBCCDD), res.GetWord(8)) } diff --git a/cannon/mipsevm/memory/memory64_benchmark_test.go b/cannon/mipsevm/memory/memory64_benchmark_test.go new file mode 100644 index 0000000000000..8ae56c5b590e4 --- /dev/null +++ b/cannon/mipsevm/memory/memory64_benchmark_test.go @@ -0,0 +1,137 @@ +//go:build cannon64 +// +build cannon64 + +package memory + +import ( + "testing" + + "math/rand" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" +) + +const ( + smallDataset = 12_500_000 + mediumDataset = 100_000_000 + largeDataset = 400_000_000 +) + +func BenchmarkMemoryOperations(b *testing.B) { + benchmarks := []struct { + name string + fn func(b *testing.B, m *Memory) + }{ + {"RandomReadWrite_Small", benchRandomReadWrite(smallDataset)}, + {"RandomReadWrite_Medium", benchRandomReadWrite(mediumDataset)}, + {"RandomReadWrite_Large", benchRandomReadWrite(largeDataset)}, + {"SequentialReadWrite_Small", benchSequentialReadWrite(smallDataset)}, + {"SequentialReadWrite_Large", benchSequentialReadWrite(largeDataset)}, + {"SparseMemoryUsage", benchSparseMemoryUsage}, + {"DenseMemoryUsage", benchDenseMemoryUsage}, + {"SmallFrequentUpdates", benchSmallFrequentUpdates}, + {"MerkleProofGeneration_Small", benchMerkleProofGeneration(smallDataset)}, + {"MerkleProofGeneration_Large", benchMerkleProofGeneration(largeDataset)}, + {"MerkleRootCalculation_Small", benchMerkleRootCalculation(smallDataset)}, + {"MerkleRootCalculation_Large", benchMerkleRootCalculation(largeDataset)}, + } + + for _, bm := range benchmarks { + b.Run("BinaryTree", func(b *testing.B) { + b.Run(bm.name, func(b *testing.B) { + m := NewBinaryTreeMemory() + b.ResetTimer() + bm.fn(b, m) + }) + }) + } +} + +func benchRandomReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + addresses := make([]uint64, size) + for i := range addresses { + addresses[i] = rand.Uint64() & arch.AddressMask + } + data := Word(0x1234567890ABCDEF) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := addresses[i%len(addresses)] + if i%2 == 0 { + m.SetWord(addr, data) + } else { + data = m.GetWord(addr) + } + } + } +} + +func benchSequentialReadWrite(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + data := Word(0x1234567890ABCDEF) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := Word((i % size) * 8) + if i%2 == 0 { + m.SetWord(addr, data) + } else { + data = m.GetWord(addr) + } + } + } +} + +func benchSparseMemoryUsage(b *testing.B, m *Memory) { + data := Word(0x1234567890ABCDEF) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := (uint64(i) * 10_000_000) & arch.AddressMask // Large gaps between addresses + m.SetWord(addr, data) + } +} + +func benchDenseMemoryUsage(b *testing.B, m *Memory) { + data := Word(0x1234567890ABCDEF) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(i) * 8 // Contiguous 8-byte allocations + m.SetWord(addr, data) + } +} + +func benchSmallFrequentUpdates(b *testing.B, m *Memory) { + data := Word(0x1234567890ABCDEF) + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := Word(rand.Intn(1000000)) & arch.AddressMask // Confined to a smaller range + m.SetWord(addr, data) + } +} + +func benchMerkleProofGeneration(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetWord(uint64(i)*8, Word(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + addr := uint64(rand.Intn(size) * 8) + _ = m.MerkleProof(addr) + } + } +} + +func benchMerkleRootCalculation(size int) func(b *testing.B, m *Memory) { + return func(b *testing.B, m *Memory) { + // Setup: allocate some memory + for i := 0; i < size; i++ { + m.SetWord(uint64(i)*8, Word(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = m.MerkleRoot() + } + } +} diff --git a/cannon/mipsevm/memory/memory64_test.go b/cannon/mipsevm/memory/memory64_binary_tree_test.go similarity index 89% rename from cannon/mipsevm/memory/memory64_test.go rename to cannon/mipsevm/memory/memory64_binary_tree_test.go index 203ff5f80d2f4..1fce4123765a8 100644 --- a/cannon/mipsevm/memory/memory64_test.go +++ b/cannon/mipsevm/memory/memory64_binary_tree_test.go @@ -18,9 +18,9 @@ import ( // These tests are mostly copied from memory_test.go. With a few tweaks for 64-bit. -func TestMemory64MerkleProof(t *testing.T) { +func TestMemory64BinaryTreeMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0x10000, 0xAABBCCDD_EEFF1122) proof := m.MerkleProof(0x10000) require.Equal(t, uint64(0xAABBCCDD_EEFF1122), binary.BigEndian.Uint64(proof[:8])) @@ -29,7 +29,7 @@ func TestMemory64MerkleProof(t *testing.T) { } }) t.Run("fuller tree", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0x10000, 0xaabbccdd) m.SetWord(0x80008, 42) m.SetWord(0x13370000, 123) @@ -51,40 +51,40 @@ func TestMemory64MerkleProof(t *testing.T) { }) } -func TestMemory64MerkleRoot(t *testing.T) { +func TestMemory64BinaryTreeMerkleRoot(t *testing.T) { t.Run("empty", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("empty page", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0xF000, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("single page", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0xF000, 1) root := m.MerkleRoot() require.NotEqual(t, zeroHashes[64-5], root, "non-zero memory") }) t.Run("repeat zero", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0xF000, 0) m.SetWord(0xF008, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("two empty pages", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(PageSize*3, 0) m.SetWord(PageSize*10, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("random few pages", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(PageSize*3, 1) m.SetWord(PageSize*5, 42) m.SetWord(PageSize*6, 123) @@ -106,7 +106,7 @@ func TestMemory64MerkleRoot(t *testing.T) { require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") }) t.Run("invalidate page", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(0xF000, 0) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero at first") m.SetWord(0xF008, 1) @@ -116,9 +116,9 @@ func TestMemory64MerkleRoot(t *testing.T) { }) } -func TestMemory64ReadWrite(t *testing.T) { +func TestMemory64BinaryTreeReadWrite(t *testing.T) { t.Run("large random", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() data := make([]byte, 20_000) _, err := rand.Read(data[:]) require.NoError(t, err) @@ -131,7 +131,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("repeat range", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() data := []byte(strings.Repeat("under the big bright yellow sun ", 40)) require.NoError(t, m.SetMemoryRange(0x1337, bytes.NewReader(data))) res, err := io.ReadAll(m.ReadMemoryRange(0x1337-10, Word(len(data)+20))) @@ -142,7 +142,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("empty range", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() addr := Word(0xAABBCC00) r := bytes.NewReader(nil) pre := m.MerkleRoot() @@ -168,7 +168,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("range page overlap", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() data := bytes.Repeat([]byte{0xAA}, PageAddrSize) require.NoError(t, m.SetMemoryRange(0, bytes.NewReader(data))) for i := 0; i < PageAddrSize/arch.WordSizeBytes; i++ { @@ -186,7 +186,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("read-write", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(16, 0xAABBCCDD_EEFF1122) require.Equal(t, Word(0xAABBCCDD_EEFF1122), m.GetWord(16)) m.SetWord(16, 0xAABB1CDD_EEFF1122) @@ -196,7 +196,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("unaligned read", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(16, Word(0xAABBCCDD_EEFF1122)) m.SetWord(24, 0x11223344_55667788) for i := Word(17); i < 24; i++ { @@ -210,7 +210,7 @@ func TestMemory64ReadWrite(t *testing.T) { }) t.Run("unaligned write", func(t *testing.T) { - m := NewMemory() + m := NewBinaryTreeMemory() m.SetWord(16, 0xAABBCCDD_EEFF1122) require.Panics(t, func() { m.SetWord(17, 0x11223344) @@ -237,18 +237,18 @@ func TestMemory64ReadWrite(t *testing.T) { }) } -func TestMemory64JSON(t *testing.T) { - m := NewMemory() +func TestMemory64BinaryTreeJSON(t *testing.T) { + m := NewBinaryTreeMemory() m.SetWord(8, 0xAABBCCDD_EEFF1122) dat, err := json.Marshal(m) require.NoError(t, err) - var res Memory + res := NewBinaryTreeMemory() require.NoError(t, json.Unmarshal(dat, &res)) require.Equal(t, Word(0xAABBCCDD_EEFF1122), res.GetWord(8)) } -func TestMemory64Copy(t *testing.T) { - m := NewMemory() +func TestMemory64BinaryTreeCopy(t *testing.T) { + m := NewBinaryTreeMemory() m.SetWord(0xAABBCCDD_8000, 0x000000_AABB) mcpy := m.Copy() require.Equal(t, Word(0xAABB), mcpy.GetWord(0xAABBCCDD_8000)) diff --git a/cannon/mipsevm/multithreaded/state_test.go b/cannon/mipsevm/multithreaded/state_test.go index 1a457b8355654..2badb9ee1f6f7 100644 --- a/cannon/mipsevm/multithreaded/state_test.go +++ b/cannon/mipsevm/multithreaded/state_test.go @@ -118,7 +118,7 @@ func TestState_JSONCodec(t *testing.T) { stateJSON, err := json.Marshal(state) require.NoError(t, err) - var newState *State + newState := CreateEmptyState() err = json.Unmarshal(stateJSON, &newState) require.NoError(t, err) diff --git a/cannon/mipsevm/singlethreaded/state.go b/cannon/mipsevm/singlethreaded/state.go index c02528ec35200..71e664cbbc163 100644 --- a/cannon/mipsevm/singlethreaded/state.go +++ b/cannon/mipsevm/singlethreaded/state.go @@ -111,6 +111,7 @@ func (s *State) MarshalJSON() ([]byte, error) { // nosemgrep func (s *State) UnmarshalJSON(data []byte) error { sm := new(stateMarshaling) + sm.Memory = memory.NewMemory() if err := json.Unmarshal(data, sm); err != nil { return err } diff --git a/cannon/mipsevm/singlethreaded/state_test.go b/cannon/mipsevm/singlethreaded/state_test.go index bfab4dd279885..be2ce50357db4 100644 --- a/cannon/mipsevm/singlethreaded/state_test.go +++ b/cannon/mipsevm/singlethreaded/state_test.go @@ -73,7 +73,7 @@ func TestStateJSONCodec(t *testing.T) { stateJSON, err := state.MarshalJSON() require.NoError(t, err) - newState := new(State) + newState := CreateEmptyState() require.NoError(t, newState.UnmarshalJSON(stateJSON)) require.Equal(t, state.PreimageKey, newState.PreimageKey) diff --git a/op-challenger/game/fault/trace/cannon/provider_test.go b/op-challenger/game/fault/trace/cannon/provider_test.go index 01cd513cb85ff..d7ec9fd8da761 100644 --- a/op-challenger/game/fault/trace/cannon/provider_test.go +++ b/op-challenger/game/fault/trace/cannon/provider_test.go @@ -52,7 +52,7 @@ func TestGet(t *testing.T) { t.Run("ProofAfterEndOfTrace", func(t *testing.T) { provider, generator := setupWithTestData(t, dataDir, prestate) generator.finalState = &singlethreaded.State{ - Memory: &memory.Memory{}, + Memory: memory.NewMemory(), Step: 10, Exited: true, } @@ -108,7 +108,7 @@ func TestGetStepData(t *testing.T) { dataDir, prestate := setupTestData(t) provider, generator := setupWithTestData(t, dataDir, prestate) generator.finalState = &singlethreaded.State{ - Memory: &memory.Memory{}, + Memory: memory.NewMemory(), Step: 10, Exited: true, } @@ -134,7 +134,7 @@ func TestGetStepData(t *testing.T) { dataDir, prestate := setupTestData(t) provider, generator := setupWithTestData(t, dataDir, prestate) generator.finalState = &singlethreaded.State{ - Memory: &memory.Memory{}, + Memory: memory.NewMemory(), Step: 10, Exited: true, } @@ -160,7 +160,7 @@ func TestGetStepData(t *testing.T) { dataDir, prestate := setupTestData(t) provider, initGenerator := setupWithTestData(t, dataDir, prestate) initGenerator.finalState = &singlethreaded.State{ - Memory: &memory.Memory{}, + Memory: memory.NewMemory(), Step: 10, Exited: true, } @@ -178,7 +178,7 @@ func TestGetStepData(t *testing.T) { provider, generator := setupWithTestData(t, dataDir, prestate) generator.finalState = &singlethreaded.State{ - Memory: &memory.Memory{}, + Memory: memory.NewMemory(), Step: 10, Exited: true, }