Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions rvgo/fast/instrumented.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,6 @@ func (m *InstrumentedState) Step(proof bool) (wit *StepWitness, err error) {
}

err = m.riscvStep()
if err != nil {
return nil, err
}

if proof {
wit.MemProof = make([]byte, 0, len(m.memProofs)*memProofSize)
Expand All @@ -70,6 +67,7 @@ func (m *InstrumentedState) Step(proof bool) (wit *StepWitness, err error) {
wit.PreimageValue = m.lastPreimage
}
}

return
}

Expand Down
52 changes: 34 additions & 18 deletions rvgo/fast/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,22 @@ func (e *UnsupportedSyscallErr) Error() string {
return fmt.Sprintf("unsupported system call: %d", e.SyscallNum)
}

type UnrecognizedSyscallErr struct {
SyscallNum U64
}

func (e *UnrecognizedSyscallErr) Error() string {
return fmt.Sprintf("unrecognized system call: %d", e.SyscallNum)
}

type UnrecognizedResourceErr struct {
Resource U64
}

func (e *UnrecognizedResourceErr) Error() string {
return fmt.Sprintf("unrecognized resource limit lookup: %d", e.Resource)
}

// riscvStep runs a single instruction
// Note: errors are only returned in debugging/tooling modes, not in production use.
func (inst *InstrumentedState) riscvStep() (outErr error) {
Expand Down Expand Up @@ -123,7 +139,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {

getRegister := func(reg U64) U64 {
if reg > 31 {
revertWithCode(0xbad4e9, fmt.Errorf("cannot load invalid register: %d", reg))
revertWithCode(riscv.ErrInvalidRegister, fmt.Errorf("cannot load invalid register: %d", reg))
}
//fmt.Printf("load reg %2d: %016x\n", reg, state.Registers[reg])
return s.Registers[reg]
Expand All @@ -150,7 +166,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {

getMemoryB32 := func(addr U64, proofIndex uint8) (out [32]byte) {
if addr&31 != 0 { // quick addr alignment check
revertWithCode(0xbad10ad0, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
}
inst.trackMemAccess(addr, proofIndex)
s.Memory.GetUnaligned(addr, out[:])
Expand All @@ -168,12 +184,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
// load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory
loadMem := func(addr U64, size U64, signed bool, proofIndexL uint8, proofIndexR uint8) (out U64) {
if size > 8 {
revertWithCode(0xbad512e0, fmt.Errorf("cannot load more than 8 bytes: %d", size))
revertWithCode(riscv.ErrLoadExceeds8Bytes, fmt.Errorf("cannot load more than 8 bytes: %d", size))
}
inst.trackMemAccess(addr&^31, proofIndexL)
if (addr+size-1)&^31 != addr&^31 {
if proofIndexR == 0xff {
revertWithCode(0xbad22220, fmt.Errorf("unexpected need for right-side proof %d in loadMem", proofIndexR))
revertWithCode(riscv.ErrUnexpectedRProofLoad, fmt.Errorf("unexpected need for right-side proof %d in loadMem", proofIndexR))
}
inst.trackMemAccess((addr+size-1)&^31, proofIndexR)
}
Expand All @@ -190,7 +206,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {

storeMemUnaligned := func(addr U64, size U64, value U256, proofIndexL uint8, proofIndexR uint8, verifyL bool, verifyR bool) {
if size > 32 {
revertWithCode(0xbad512e1, fmt.Errorf("cannot store more than 32 bytes: %d", size))
revertWithCode(riscv.ErrStoreExceeds32Bytes, fmt.Errorf("cannot store more than 32 bytes: %d", size))
}
var bytez [32]byte
binary.LittleEndian.PutUint64(bytez[:8], value[0])
Expand All @@ -208,7 +224,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
return
}
if proofIndexR == 0xff {
revertWithCode(0xbad22221, fmt.Errorf("unexpected need for right-side proof %d in storeMemUnaligned", proofIndexR))
revertWithCode(riscv.ErrUnexpectedRProofStoreUnaligned, fmt.Errorf("unexpected need for right-side proof %d in storeMemUnaligned", proofIndexR))
}
// if not aligned
rightAddr := leftAddr + 32
Expand All @@ -223,7 +239,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {

storeMem := func(addr U64, size U64, value U64, proofIndexL uint8, proofIndexR uint8, verifyL bool, verifyR bool) {
if size > 8 {
revertWithCode(0xbad512e8, fmt.Errorf("cannot store more than 8 bytes: %d", size))
revertWithCode(riscv.ErrStoreExceeds8Bytes, fmt.Errorf("cannot store more than 8 bytes: %d", size))
}
var bytez [8]byte
binary.LittleEndian.PutUint64(bytez[:], value)
Expand All @@ -238,7 +254,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
}
// if not aligned
if proofIndexR == 0xff {
revertWithCode(0xbad2222f, fmt.Errorf("unexpected need for right-side proof %d in storeMem", proofIndexR))
revertWithCode(riscv.ErrUnexpectedRProofStore, fmt.Errorf("unexpected need for right-side proof %d in storeMem", proofIndexR))
}
rightAddr := leftAddr + 32
leftSize := rightAddr - addr
Expand Down Expand Up @@ -271,7 +287,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
case 3: // ?11 = CSRRC(I)
v = and64(out, not64(v))
default:
revertWithCode(0xbadc0de0, fmt.Errorf("unkwown CSR mode: %d", mode))
revertWithCode(riscv.ErrUnknownCSRMode, fmt.Errorf("unkwown CSR mode: %d", mode))
}
writeCSR(num, v)
return
Expand Down Expand Up @@ -315,7 +331,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {

pdatB32, pdatlen, err := inst.readPreimage(preImageKey, offset) // pdat is left-aligned
if err != nil {
revertWithCode(0xbadf00d0, err)
revertWithCode(riscv.ErrFailToReadPreimage, err)
}
if iszero64(pdatlen) { // EOF
return toU64(0)
Expand Down Expand Up @@ -538,7 +554,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
setRegister(toU64(10), toU64(0))
setRegister(toU64(11), toU64(0))
default:
revertWithCode(0xf0012, fmt.Errorf("unrecognized resource limit lookup: %d", res))
revertWithCode(riscv.ErrUnrecognizedResource, &UnrecognizedResourceErr{Resource: res})
}
case riscv.SysMadvise: // madvise - ignored
setRegister(toU64(10), toU64(0))
Expand Down Expand Up @@ -568,13 +584,13 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
setRegister(toU64(10), toU64(0))
setRegister(toU64(11), toU64(0))
case riscv.SysPrlimit64: // prlimit64 -- unsupported, we have getrlimit, is prlimit64 even called?
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
case riscv.SysFutex: // futex - not supported, for now
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
case riscv.SysNanosleep: // nanosleep - not supported, for now
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
default:
revertWithCode(0xf001ca11, fmt.Errorf("unrecognized system call: %d", a7))
revertWithCode(riscv.ErrInvalidSyscall, &UnrecognizedSyscallErr{SyscallNum: a7})
}
}

Expand Down Expand Up @@ -889,7 +905,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
// 0b011 == RV64A D variants
size := shl64(funct3, toU64(1))
if lt64(size, toU64(4)) != 0 {
revertWithCode(0xbada70, fmt.Errorf("bad AMO size: %d", size))
revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
}
addr := getRegister(rs1)
// TODO check if addr is aligned
Expand Down Expand Up @@ -945,7 +961,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
v = value
}
default:
revertWithCode(0xf001a70, fmt.Errorf("unknown atomic operation %d", op))
revertWithCode(riscv.ErrUnknownAtomicOperation, fmt.Errorf("unknown atomic operation %d", op))
}
storeMem(addr, size, v, 1, 3, false, true) // after overwriting 1, proof 2 is no longer valid
setRegister(rd, rdValue)
Expand All @@ -963,7 +979,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
case 0x53: // FADD etc. no-op is enough to pass Go runtime check
setPC(add64(pc, toU64(4))) // no-op this.
default:
revertWithCode(0xf001c0de, fmt.Errorf("unknown instruction opcode: %d", opcode))
revertWithCode(riscv.ErrUnknownOpCode, fmt.Errorf("unknown instruction opcode: %d", opcode))
}
return nil
}
17 changes: 17 additions & 0 deletions rvgo/riscv/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,21 @@ const (
FdHintWrite = 4
FdPreimageRead = 5
FdPreimageWrite = 6

ErrUnrecognizedResource = uint64(0xf0012)
ErrUnknownAtomicOperation = uint64(0xf001a70)
ErrUnknownOpCode = uint64(0xf001c0de)
ErrInvalidSyscall = uint64(0xf001ca11)
ErrInvalidRegister = uint64(0xbad4e9)
ErrNotAlignedAddr = uint64(0xbad10ad0)
ErrLoadExceeds8Bytes = uint64(0xbad512e0)
ErrStoreExceeds8Bytes = uint64(0xbad512e8)
ErrStoreExceeds32Bytes = uint64(0xbad512e1)
ErrUnexpectedRProofLoad = uint64(0xbad22220)
ErrUnexpectedRProofStoreUnaligned = uint64(0xbad22221)
ErrUnexpectedRProofStore = uint64(0xbad2222f)
ErrUnknownCSRMode = uint64(0xbadc0de0)
ErrBadAMOSize = uint64(0xbada70)
ErrFailToReadPreimage = uint64(0xbadf00d0)
ErrBadMemoryProof = uint64(0xbadf00d1)
)
63 changes: 42 additions & 21 deletions rvgo/slow/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,36 @@ func (e *UnsupportedSyscallErr) Error() string {
return fmt.Sprintf("unsupported system call: %d", e.SyscallNum)
}

type UnrecognizedSyscallErr struct {
SyscallNum U64
}

func (e *UnrecognizedSyscallErr) Error() string {
return fmt.Sprintf("unrecognized system call: %d", e.SyscallNum)
}

type UnrecognizedResourceErr struct {
Resource U64
}

func (e *UnrecognizedResourceErr) Error() string {
return fmt.Sprintf("unrecognized resource limit lookup: %d", e.Resource)
}

type PreimageOracle interface {
ReadPreimagePart(key [32]byte, offset uint64) (dat [32]byte, datlen uint8, err error)
}

func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr error) {
var revertCode uint64
defer func() {
if err := recover(); err != nil {
outErr = fmt.Errorf("revert: %v", err)
if errInterface := recover(); errInterface != nil {
if err, ok := errInterface.(error); ok {
outErr = fmt.Errorf("revert: %w", err)
} else {
outErr = fmt.Errorf("revert: %v", err)
}

}
if revertCode != 0 {
outErr = fmt.Errorf("revert %x: %w", revertCode, outErr)
Expand Down Expand Up @@ -196,7 +217,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err

getRegister := func(reg U64) U64 {
if gt64(reg, toU64(31)) != (U64{}) {
revertWithCode(0xbad4e9, fmt.Errorf("cannot load invalid register: %d", reg.val()))
revertWithCode(riscv.ErrInvalidRegister, fmt.Errorf("cannot load invalid register: %d", reg.val()))
}
//fmt.Printf("load reg %2d: %016x\n", reg, state.Registers[reg])
offset := add64(toU64(stateOffsetRegisters), mul64(reg, toU64(8)))
Expand All @@ -209,7 +230,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
return
}
if gt64(reg, toU64(31)) != (U64{}) {
revertWithCode(0xbad4e9, fmt.Errorf("unknown register %d, cannot write %x", reg.val(), v.val()))
revertWithCode(riscv.ErrInvalidRegister, fmt.Errorf("unknown register %d, cannot write %x", reg.val(), v.val()))
}
offset := add64(toU64(stateOffsetRegisters), mul64(reg, toU64(8)))
writeState(offset.val(), 8, encodeU64BE(v))
Expand Down Expand Up @@ -261,7 +282,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err

getMemoryB32 := func(addr U64, proofIndex uint8) (out [32]byte) {
if and64(addr, toU64(31)) != (U64{}) { // quick addr alignment check
revertWithCode(0xbad10ad0, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
}
offset := proofOffset(proofIndex)
leaf := calldataload(offset)
Expand All @@ -281,7 +302,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
}
memRoot := getMemRoot()
if iszero(eq(b32asBEWord(node), b32asBEWord(memRoot))) { // verify the root matches
revertWithCode(0xbadf00d1, fmt.Errorf("bad memory proof, got mem root: %x, expected %x", node, memRoot))
revertWithCode(riscv.ErrBadMemoryProof, fmt.Errorf("bad memory proof, got mem root: %x, expected %x", node, memRoot))
}
out = leaf
return
Expand All @@ -291,7 +312,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
// it assumes the same memory proof has been verified with getMemoryB32
setMemoryB32 := func(addr U64, v [32]byte, proofIndex uint8) {
if and64(addr, toU64(31)) != (U64{}) {
revertWithCode(0xbad10ad0, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
}
offset := proofOffset(proofIndex)
leaf := v
Expand All @@ -315,7 +336,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
// load unaligned, optionally signed, little-endian, integer of 1 ... 8 bytes from memory
loadMem := func(addr U64, size U64, signed bool, proofIndexL uint8, proofIndexR uint8) (out U64) {
if size.val() > 8 {
revertWithCode(0xbad512e0, fmt.Errorf("cannot load more than 8 bytes: %d", size))
revertWithCode(riscv.ErrLoadExceeds8Bytes, fmt.Errorf("cannot load more than 8 bytes: %d", size))
}
// load/verify left part
leftAddr := and64(addr, not64(toU64(31)))
Expand All @@ -329,7 +350,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
if iszero64(eq64(leftAddr, rightAddr)) {
// if unaligned, use second proof for the right part
if proofIndexR == 0xff {
revertWithCode(0xbad22220, fmt.Errorf("unexpected need for right-side proof %d in loadMem", proofIndexR))
revertWithCode(riscv.ErrUnexpectedRProofLoad, fmt.Errorf("unexpected need for right-side proof %d in loadMem", proofIndexR))
}
// load/verify right part
right = b32asBEWord(getMemoryB32(rightAddr, proofIndexR))
Expand Down Expand Up @@ -400,7 +421,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err

storeMemUnaligned := func(addr U64, size U64, value U256, proofIndexL uint8, proofIndexR uint8) {
if size.val() > 32 {
revertWithCode(0xbad512e1, fmt.Errorf("cannot store more than 32 bytes: %d", size))
revertWithCode(riscv.ErrStoreExceeds32Bytes, fmt.Errorf("cannot store more than 32 bytes: %d", size))
}

leftAddr := and64(addr, not64(toU64(31)))
Expand All @@ -420,7 +441,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
return
}
if proofIndexR == 0xff {
revertWithCode(0xbad22221, fmt.Errorf("unexpected need for right-side proof %d in storeMemUnaligned", proofIndexR))
revertWithCode(riscv.ErrUnexpectedRProofStoreUnaligned, fmt.Errorf("unexpected need for right-side proof %d in storeMemUnaligned", proofIndexR))
}
// load the right base (with updated mem root)
right := b32asBEWord(getMemoryB32(rightAddr, proofIndexR))
Expand Down Expand Up @@ -454,7 +475,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
case 3: // ?11 = CSRRC(I)
v = and64(out, not64(v))
default:
revertWithCode(0xbadc0de0, fmt.Errorf("unkwown CSR mode: %d", mode.val()))
revertWithCode(riscv.ErrUnknownCSRMode, fmt.Errorf("unkwown CSR mode: %d", mode.val()))
}
writeCSR(num, v)
return
Expand Down Expand Up @@ -500,7 +521,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
datlen = toU64(l)
return
}
revertWithCode(0xbadf00d0, err)
revertWithCode(riscv.ErrFailToReadPreimage, err)
return
}

Expand Down Expand Up @@ -713,7 +734,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
setRegister(toU64(10), toU64(0))
setRegister(toU64(11), toU64(0))
default:
revertWithCode(0xf0012, fmt.Errorf("unrecognized resource limit lookup: %d", res))
revertWithCode(riscv.ErrUnrecognizedResource, &UnrecognizedResourceErr{Resource: res})
}
case riscv.SysMadvise: // madvise - ignored
setRegister(toU64(10), toU64(0))
Expand Down Expand Up @@ -743,13 +764,13 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
setRegister(toU64(10), toU64(0))
setRegister(toU64(11), toU64(0))
case riscv.SysPrlimit64: // prlimit64 -- unsupported, we have getrlimit, is prlimit64 even called?
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
case riscv.SysFutex: // futex - not supported, for now
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
case riscv.SysNanosleep: // nanosleep - not supported, for now
revertWithCode(0xf001ca11, &UnsupportedSyscallErr{SyscallNum: a7})
revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
default:
revertWithCode(0xf001ca11, fmt.Errorf("unrecognized system call: %d", a7))
revertWithCode(riscv.ErrInvalidSyscall, &UnrecognizedSyscallErr{SyscallNum: a7})
}
}

Expand Down Expand Up @@ -1064,7 +1085,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
// 0b011 == RV64A D variants
size := shl64(funct3, toU64(1))
if lt64(size, toU64(4)) != (U64{}) {
revertWithCode(0xbada70, fmt.Errorf("bad AMO size: %d", size))
revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
}
addr := getRegister(rs1)
// TODO check if addr is aligned
Expand Down Expand Up @@ -1120,7 +1141,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
v = value
}
default:
revertWithCode(0xf001a70, fmt.Errorf("unknown atomic operation %d", op))
revertWithCode(riscv.ErrUnknownAtomicOperation, fmt.Errorf("unknown atomic operation %d", op))
}
storeMem(addr, size, v, 1, 3) // after overwriting 1, proof 2 is no longer valid
setRegister(rd, rdValue)
Expand All @@ -1138,7 +1159,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
case 0x53: // FADD etc. no-op is enough to pass Go runtime check
setPC(add64(pc, toU64(4))) // no-op this.
default:
revertWithCode(0xf001c0de, fmt.Errorf("unknown instruction opcode: %d", opcode))
revertWithCode(riscv.ErrUnknownOpCode, fmt.Errorf("unknown instruction opcode: %d", opcode))
}
return computeStateHash(), nil
}
Loading