Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
60 commits
Select commit Hold shift + click to select a range
95c36ef
Abstract the Merkle representation
ec2 Feb 10, 2025
86ec0d5
Implement asterisc's MPT
ec2 Feb 10, 2025
9058d7e
migrate tests for mpt
ec2 Feb 10, 2025
161dc8d
migrate tests for mpt
ec2 Feb 10, 2025
c97d345
copied benchmarks from asterisc
ec2 Feb 10, 2025
cf19a8a
fix failed merge
ec2 Feb 10, 2025
78d5295
Merge branch 'develop' into ec2/mem-abstraction
ec2 Feb 10, 2025
68af153
Merge branch 'develop' into ec2/mem-abstraction
ec2 Feb 13, 2025
7618e79
Avoid pagelookup twice during setword invalidation
ec2 Feb 13, 2025
bf5a7c5
use uints
ec2 Feb 13, 2025
a0dd029
hashpool
ec2 Feb 13, 2025
08078ac
alloc pages upfront
ec2 Feb 18, 2025
9598042
remove clutter
ec2 Feb 18, 2025
34175c2
fix
ec2 Feb 18, 2025
b595708
gomallocregion
ec2 Feb 18, 2025
b7b7898
more readable mapped regions
ec2 Feb 20, 2025
8d5f0ed
Merge branch 'develop' into ec2/mem-abstraction
ec2 Feb 24, 2025
3260de5
fix state json codec test
ec2 Feb 24, 2025
28b1b61
fix for singlethread too
ec2 Feb 24, 2025
4c4b15c
fix op-challenger test
ec2 Feb 24, 2025
8e69dec
Merge branch 'develop' into ec2/mem-abstraction
ec2 Mar 3, 2025
f9cece4
Remove MPT implementation
ec2 Mar 3, 2025
c4e518f
address comments
ec2 Mar 3, 2025
8cc8fe0
fix benchmark
ec2 Mar 3, 2025
070f132
Use uint64 and also reuse hasher and buffers
ec2 Mar 3, 2025
795e741
Merge branch 'ec2/bitlist' into ec2/alloc-preheap
ec2 Mar 3, 2025
892f91a
clean
ec2 Mar 3, 2025
ef422bb
clean up some
ec2 Mar 3, 2025
9ec5408
fix range
ec2 Mar 3, 2025
122a406
Merge branch 'develop' into ec2/alloc-preheap
ec2 May 28, 2025
89983c0
Merge branch 'develop' into ec2/alloc-preheap
ec2 May 29, 2025
8b24058
Merge branch 'develop' into ec2/alloc-preheap
ec2 May 29, 2025
1c8b967
Merge branch 'ec2/alloc-preheap' of https://github.com/ChainSafe/opti…
salindne Jun 17, 2025
2a19b22
Address reviewer feedback: clarify region bounds and add instruction …
salindne Jul 9, 2025
2d6d659
Revert "Address reviewer feedback: clarify region bounds and add inst…
salindne Jul 9, 2025
22a3fd7
Clarify region bounds and add fallback to memory decode if PC exceeds…
salindne Jul 10, 2025
2965ca1
Update cannon/mipsevm/multithreaded/mips.go
salindne Jul 14, 2025
05b870f
fixed size of preheap cache for binaryTreeMemory
salindne Jul 15, 2025
d311e21
slice out of bounds fixed in GetWord and AllocPage, pagetest syntax fix
salindne Jul 15, 2025
e243074
fix program heap initialization for binary tree
salindne Jul 16, 2025
0b843b5
Merge remote-tracking branch 'origin/develop' into ec2/alloc-preheap
salindne Jul 22, 2025
ea7f805
moved cache decoded from instrumented state declaration to doMipsStep
salindne Jul 26, 2025
3773910
added default and set region map size to newMemory
salindne Jul 26, 2025
7d86885
added 4096 byte memory region option to all testing cases using VMFac…
salindne Jul 26, 2025
a0e4f05
clean up
salindne Jul 26, 2025
47a7166
cache_decode to be initialize on first mipstep only for single step …
salindne Jul 28, 2025
f27ab39
added heap region sizing for unit/fuzz tests
salindne Jul 28, 2025
851c834
Revert " cache_decode to be initialize on first mipstep only for sing…
salindne Jul 29, 2025
43f5bf1
cache allocation in instrumentedState initialization, catch for stale…
salindne Jul 29, 2025
49f4d96
remove unit test logic from domipstep and added helper for cache upda…
salindne Jul 31, 2025
a9536d0
merge with current develop
salindne Aug 1, 2025
86e6177
update multithreaded test
salindne Aug 1, 2025
4aaf304
merge fuzz test migrations
salindne Aug 11, 2025
98ae85b
merge migration changes from develop
salindne Aug 11, 2025
25b4751
some test step boundaries in instrumented_test needed to be slightly …
salindne Aug 12, 2025
11b2176
small changes to step count and mismatch error vs string fault
salindne Aug 13, 2025
d4f430a
cannon: Use tiny i-cache by default
Inphi Aug 15, 2025
5ce1a09
op-e2e: Limit max parallelism for fp tests
Inphi Aug 15, 2025
7707d32
increase parallelism
Inphi Aug 16, 2025
ce9dc7e
Merge pull request #10 from ethereum-optimism/inphi/ec2/alloc-preheap
salindne Aug 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2229,7 +2229,7 @@ workflows:
notify: true
mentions: "@proofs-team"
no_output_timeout: 90m
test_timeout: 240m
test_timeout: 480m
resource_class: ethereum-optimism/latitude-fps-1
context:
- slack
Expand Down
2 changes: 1 addition & 1 deletion cannon/cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ func Run(ctx *cli.Context) error {
}
}

state, err := versions.LoadStateFromFile(ctx.Path(RunInputFlag.Name))
state, err := versions.LoadStateFromFileWithLargeICache(ctx.Path(RunInputFlag.Name))
if err != nil {
return fmt.Errorf("failed to load state: %w", err)
}
Expand Down
11 changes: 6 additions & 5 deletions cannon/mipsevm/arch/arch64.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,12 @@ const (
ExtMask = 0x7

// Ensure virtual address is limited to 48-bits as many user programs assume such to implement packed pointers
// limit 0x00_00_FF_FF_FF_FF_FF_FF
HeapStart = 0x00_00_10_00_00_00_00_00
HeapEnd = 0x00_00_60_00_00_00_00_00
ProgramBreak = 0x00_00_40_00_00_00_00_00
HighMemoryStart = 0x00_00_7F_FF_FF_FF_F0_00
Limit = 0x00_00_FF_FF_FF_FF_FF_FF
ProgramHeapStart = 0x00_00_00_c0_00_00_00_00
HeapStart = 0x00_00_10_00_00_00_00_00
HeapEnd = 0x00_00_60_00_00_00_00_00
ProgramBreak = 0x00_00_40_00_00_00_00_00
HighMemoryStart = 0x00_00_7F_FF_FF_FF_F0_00
)

// MIPS64 syscall table - https://github.com/torvalds/linux/blob/3efc57369a0ce8f76bf0804f7e673982384e4ac9/arch/mips/kernel/syscalls/syscall_n64.tbl. Generate the syscall numbers using the Makefile in that directory.
Expand Down
40 changes: 35 additions & 5 deletions cannon/mipsevm/memory/binary_tree.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
package memory

import (
"fmt"
"math/bits"

"github.com/ethereum-optimism/optimism/cannon/mipsevm/arch"
)

// BinaryTreeIndex is a representation of the state of the memory in a binary merkle tree.
Expand All @@ -12,13 +15,40 @@ type BinaryTreeIndex struct {
pageTable map[Word]*CachedPage
}

func NewBinaryTreeMemory() *Memory {
pages := make(map[Word]*CachedPage)
func NewBinaryTreeMemory(codeSize, heapSize arch.Word) *Memory {
pages := make(map[arch.Word]*CachedPage)
index := NewBinaryTreeIndex(pages)

// Default values (2 GiB) if not provided
if codeSize == 0 {
codeSize = 1 << 31 // 2 GiB
}
if heapSize == 0 {
heapSize = 1 << 31 // 2 GiB
}

// Defensive bounds: code region must not overlap heap start
if codeSize > arch.ProgramHeapStart {
panic(fmt.Sprintf("codeSize (0x%x) overlaps heap start (0x%x)", codeSize, arch.ProgramHeapStart))
}

indexedRegions := make([]MappedMemoryRegion, 2)
indexedRegions[0] = MappedMemoryRegion{
startAddr: 0,
endAddr: codeSize,
Data: make([]byte, codeSize),
}
indexedRegions[1] = MappedMemoryRegion{
startAddr: arch.ProgramHeapStart,
endAddr: arch.ProgramHeapStart + heapSize,
Data: make([]byte, heapSize),
}

return &Memory{
merkleIndex: index,
pageTable: pages,
lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages
merkleIndex: index,
pageTable: pages,
lastPageKeys: [2]arch.Word{^arch.Word(0), ^arch.Word(0)},
MappedRegions: indexedRegions,
}
}

Expand Down
103 changes: 93 additions & 10 deletions cannon/mipsevm/memory/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,20 @@ const (

type Word = arch.Word

type MappedMemoryRegion struct {
startAddr Word
endAddr Word
Data []byte
}

func (m *MappedMemoryRegion) AddrInRegion(addr Word) bool {
return addr >= m.startAddr && addr < m.endAddr
}

func (m *MappedMemoryRegion) PageIndexInRegion(pageIndex Word) bool {
return pageIndex >= m.startAddr>>PageAddrSize && pageIndex < m.endAddr>>PageAddrSize
}

type Memory struct {
merkleIndex PageIndex
// Note: since we don't de-alloc Pages, we don't do ref-counting.
Expand All @@ -38,6 +52,8 @@ type Memory struct {
// this prevents map lookups each instruction
lastPageKeys [2]Word
lastPage [2]*CachedPage

MappedRegions []MappedMemoryRegion
}

type PageIndex interface {
Expand All @@ -50,8 +66,53 @@ type PageIndex interface {
New(pages map[Word]*CachedPage) PageIndex
}

func NewMemoryWithLargeRegions() *Memory {
const codeSize = 1 << 31
const heapSize = 1 << 31
return NewBinaryTreeMemory(codeSize, heapSize)
}

func NewMemory() *Memory {
return NewBinaryTreeMemory()
return NewBinaryTreeMemory(4096, 4096)
}

// start end size gap
func (m *Memory) GetAllocatedRanges() [][4]Word {
var ranges [][4]Word
if len(m.pageTable) == 0 {
return ranges
}

// Extract and sort page addresses
keys := make([]Word, 0, len(m.pageTable))
for key := range m.pageTable {
keys = append(keys, key)
}
sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })

// Find contiguous ranges and gaps
start := keys[0]
prev := start
var lastEnd Word = start - 1

for i := 1; i < len(keys); i++ {
if keys[i] != prev+1 {
gap := start - lastEnd - 1 // Gap is calculated from end of prev range to start of new one
ranges = append(ranges, [4]Word{start, prev, prev - start + 1, gap})
lastEnd = prev
start = keys[i]
}
prev = keys[i]
}

// Append last range
gap := start - lastEnd - 1
ranges = append(ranges, [4]Word{start, prev, prev - start + 1, gap})
for i := 0; i < len(ranges); i++ {
ranges[i][0] <<= PageAddrSize
ranges[i][1] <<= PageAddrSize
}
return ranges
}

func (m *Memory) MerkleRoot() [32]byte {
Expand All @@ -66,15 +127,14 @@ func (m *Memory) PageCount() int {
return len(m.pageTable)
}

func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error {
func (m *Memory) ForEachPage(fn func(pageIndex Word, page Page) error) error {
for pageIndex, cachedPage := range m.pageTable {
if err := fn(pageIndex, cachedPage.Data); err != nil {
return err
}
}
return nil
}

func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte {
return m.merkleIndex.MerkleizeSubtree(gindex)
}
Expand Down Expand Up @@ -155,7 +215,15 @@ func (m *Memory) GetWord(addr Word) Word {
if addr&arch.ExtMask != 0 {
panic(fmt.Errorf("unaligned memory access: %x", addr))
}
for _, region := range m.MappedRegions {
if ok := region.AddrInRegion(addr); ok {
offset := addr - region.startAddr
return arch.ByteOrderWord.Word(region.Data[offset : offset+arch.WordSizeBytes : offset+arch.WordSizeBytes])
}
}

pageIndex := addr >> PageAddrSize

p, ok := m.PageLookup(pageIndex)
if !ok {
return 0
Expand All @@ -165,7 +233,17 @@ func (m *Memory) GetWord(addr Word) Word {
}

func (m *Memory) AllocPage(pageIndex Word) *CachedPage {
p := &CachedPage{Data: new(Page)}
p := new(CachedPage)
for _, region := range m.MappedRegions {
if region.PageIndexInRegion(pageIndex) {
indexAdjusted := pageIndex - region.startAddr>>PageAddrSize
p.Data = region.Data[indexAdjusted*PageSize : (indexAdjusted+1)*PageSize : (indexAdjusted+1)*PageSize]
break
}
}
if p.Data == nil {
p.Data = make(Page, PageSize)
}
m.pageTable[pageIndex] = p
m.merkleIndex.AddPage(pageIndex)
return p
Expand Down Expand Up @@ -237,8 +315,9 @@ func (m *Memory) Copy() *Memory {
}

for k, page := range m.pageTable {
data := new(Page)
*data = *page.Data
data := make(Page, PageSize)
// *data = *page.Data
copy(data, page.Data)
out.AllocPage(k).Data = data
}
return out
Expand Down Expand Up @@ -287,20 +366,23 @@ func (m *Memory) Deserialize(in io.Reader) error {
return err
}
}

return nil
}

type pageEntry struct {
Index Word `json:"index"`
Data *Page `json:"data"`
Index Word `json:"index"`
Data *[PageSize]byte `json:"data"`
}

func (m *Memory) MarshalJSON() ([]byte, error) { // nosemgrep
pages := make([]pageEntry, 0, len(m.pageTable))
for k, p := range m.pageTable {
data := new([PageSize]byte)
copy(data[:], p.Data)
pages = append(pages, pageEntry{
Index: k,
Data: p.Data,
Data: data,
})
}
sort.Slice(pages, func(i, j int) bool {
Expand All @@ -318,7 +400,8 @@ func (m *Memory) UnmarshalJSON(data []byte) error {
if _, ok := m.pageTable[p.Index]; ok {
return fmt.Errorf("cannot load duplicate page, entry %d, page index %d", i, p.Index)
}
m.AllocPage(p.Index).Data = p.Data
page := m.AllocPage(p.Index)
copy(page.Data, p.Data[:])
}
return nil
}
10 changes: 6 additions & 4 deletions cannon/mipsevm/memory/memory64_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,11 @@ import (
)

const (
smallDataset = 12_500_000
mediumDataset = 100_000_000
largeDataset = 400_000_000
smallDataset = 12_500_000
mediumDataset = 100_000_000
largeDataset = 400_000_000
defaultCodeRegionSize = 4096
defaultHeapSize = 4096
)

func BenchmarkMemoryOperations(b *testing.B) {
Expand All @@ -36,7 +38,7 @@ func BenchmarkMemoryOperations(b *testing.B) {
for _, bm := range benchmarks {
b.Run("BinaryTree", func(b *testing.B) {
b.Run(bm.name, func(b *testing.B) {
m := NewBinaryTreeMemory()
m := NewBinaryTreeMemory(defaultCodeRegionSize, defaultHeapSize)
b.ResetTimer()
bm.fn(b, m)
})
Expand Down
Loading