diff --git a/README.md b/README.md
index 78a56baece..639286ba9f 100644
--- a/README.md
+++ b/README.md
@@ -8,6 +8,7 @@ https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
[](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[](https://app.travis-ci.com/github/ethereum/go-ethereum)
[](https://discord.gg/nthXNEv)
+[](https://x.com/go_ethereum)
Automated builds are available for stable releases and the unstable master branch. Binary
archives are published at https://geth.ethereum.org/downloads/.
diff --git a/accounts/abi/abigen/source2.go.tpl b/accounts/abi/abigen/source2.go.tpl
index 8ef906b8d6..3d98cbb700 100644
--- a/accounts/abi/abigen/source2.go.tpl
+++ b/accounts/abi/abigen/source2.go.tpl
@@ -183,7 +183,7 @@ var (
// Solidity: {{.Original.String}}
func ({{ decapitalise $contract.Type}} *{{$contract.Type}}) Unpack{{.Normalized.Name}}Event(log *types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
event := "{{.Original.Name}}"
- if log.Topics[0] != {{ decapitalise $contract.Type}}.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != {{ decapitalise $contract.Type}}.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new({{$contract.Type}}{{.Normalized.Name}})
diff --git a/accounts/abi/abigen/testdata/v2/crowdsale.go.txt b/accounts/abi/abigen/testdata/v2/crowdsale.go.txt
index b2183c91ea..b548b6cdae 100644
--- a/accounts/abi/abigen/testdata/v2/crowdsale.go.txt
+++ b/accounts/abi/abigen/testdata/v2/crowdsale.go.txt
@@ -360,7 +360,7 @@ func (CrowdsaleFundTransfer) ContractEventName() string {
// Solidity: event FundTransfer(address backer, uint256 amount, bool isContribution)
func (crowdsale *Crowdsale) UnpackFundTransferEvent(log *types.Log) (*CrowdsaleFundTransfer, error) {
event := "FundTransfer"
- if log.Topics[0] != crowdsale.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != crowdsale.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(CrowdsaleFundTransfer)
diff --git a/accounts/abi/abigen/testdata/v2/dao.go.txt b/accounts/abi/abigen/testdata/v2/dao.go.txt
index 75fa95df91..c246771d6d 100644
--- a/accounts/abi/abigen/testdata/v2/dao.go.txt
+++ b/accounts/abi/abigen/testdata/v2/dao.go.txt
@@ -606,7 +606,7 @@ func (DAOChangeOfRules) ContractEventName() string {
// Solidity: event ChangeOfRules(uint256 minimumQuorum, uint256 debatingPeriodInMinutes, int256 majorityMargin)
func (dAO *DAO) UnpackChangeOfRulesEvent(log *types.Log) (*DAOChangeOfRules, error) {
event := "ChangeOfRules"
- if log.Topics[0] != dAO.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DAOChangeOfRules)
@@ -648,7 +648,7 @@ func (DAOMembershipChanged) ContractEventName() string {
// Solidity: event MembershipChanged(address member, bool isMember)
func (dAO *DAO) UnpackMembershipChangedEvent(log *types.Log) (*DAOMembershipChanged, error) {
event := "MembershipChanged"
- if log.Topics[0] != dAO.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DAOMembershipChanged)
@@ -692,7 +692,7 @@ func (DAOProposalAdded) ContractEventName() string {
// Solidity: event ProposalAdded(uint256 proposalID, address recipient, uint256 amount, string description)
func (dAO *DAO) UnpackProposalAddedEvent(log *types.Log) (*DAOProposalAdded, error) {
event := "ProposalAdded"
- if log.Topics[0] != dAO.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DAOProposalAdded)
@@ -736,7 +736,7 @@ func (DAOProposalTallied) ContractEventName() string {
// Solidity: event ProposalTallied(uint256 proposalID, int256 result, uint256 quorum, bool active)
func (dAO *DAO) UnpackProposalTalliedEvent(log *types.Log) (*DAOProposalTallied, error) {
event := "ProposalTallied"
- if log.Topics[0] != dAO.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DAOProposalTallied)
@@ -780,7 +780,7 @@ func (DAOVoted) ContractEventName() string {
// Solidity: event Voted(uint256 proposalID, bool position, address voter, string justification)
func (dAO *DAO) UnpackVotedEvent(log *types.Log) (*DAOVoted, error) {
event := "Voted"
- if log.Topics[0] != dAO.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DAOVoted)
diff --git a/accounts/abi/abigen/testdata/v2/eventchecker.go.txt b/accounts/abi/abigen/testdata/v2/eventchecker.go.txt
index 92558c5efe..8ad59e63b1 100644
--- a/accounts/abi/abigen/testdata/v2/eventchecker.go.txt
+++ b/accounts/abi/abigen/testdata/v2/eventchecker.go.txt
@@ -72,7 +72,7 @@ func (EventCheckerDynamic) ContractEventName() string {
// Solidity: event dynamic(string indexed idxStr, bytes indexed idxDat, string str, bytes dat)
func (eventChecker *EventChecker) UnpackDynamicEvent(log *types.Log) (*EventCheckerDynamic, error) {
event := "dynamic"
- if log.Topics[0] != eventChecker.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(EventCheckerDynamic)
@@ -112,7 +112,7 @@ func (EventCheckerEmpty) ContractEventName() string {
// Solidity: event empty()
func (eventChecker *EventChecker) UnpackEmptyEvent(log *types.Log) (*EventCheckerEmpty, error) {
event := "empty"
- if log.Topics[0] != eventChecker.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(EventCheckerEmpty)
@@ -154,7 +154,7 @@ func (EventCheckerIndexed) ContractEventName() string {
// Solidity: event indexed(address indexed addr, int256 indexed num)
func (eventChecker *EventChecker) UnpackIndexedEvent(log *types.Log) (*EventCheckerIndexed, error) {
event := "indexed"
- if log.Topics[0] != eventChecker.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(EventCheckerIndexed)
@@ -196,7 +196,7 @@ func (EventCheckerMixed) ContractEventName() string {
// Solidity: event mixed(address indexed addr, int256 num)
func (eventChecker *EventChecker) UnpackMixedEvent(log *types.Log) (*EventCheckerMixed, error) {
event := "mixed"
- if log.Topics[0] != eventChecker.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(EventCheckerMixed)
@@ -238,7 +238,7 @@ func (EventCheckerUnnamed) ContractEventName() string {
// Solidity: event unnamed(uint256 indexed arg0, uint256 indexed arg1)
func (eventChecker *EventChecker) UnpackUnnamedEvent(log *types.Log) (*EventCheckerUnnamed, error) {
event := "unnamed"
- if log.Topics[0] != eventChecker.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(EventCheckerUnnamed)
diff --git a/accounts/abi/abigen/testdata/v2/nameconflict.go.txt b/accounts/abi/abigen/testdata/v2/nameconflict.go.txt
index fbc61a5c6c..3fbabee5a5 100644
--- a/accounts/abi/abigen/testdata/v2/nameconflict.go.txt
+++ b/accounts/abi/abigen/testdata/v2/nameconflict.go.txt
@@ -134,7 +134,7 @@ func (NameConflictLog) ContractEventName() string {
// Solidity: event log(int256 msg, int256 _msg)
func (nameConflict *NameConflict) UnpackLogEvent(log *types.Log) (*NameConflictLog, error) {
event := "log"
- if log.Topics[0] != nameConflict.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != nameConflict.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(NameConflictLog)
diff --git a/accounts/abi/abigen/testdata/v2/numericmethodname.go.txt b/accounts/abi/abigen/testdata/v2/numericmethodname.go.txt
index 9d698a2657..d962583e48 100644
--- a/accounts/abi/abigen/testdata/v2/numericmethodname.go.txt
+++ b/accounts/abi/abigen/testdata/v2/numericmethodname.go.txt
@@ -136,7 +136,7 @@ func (NumericMethodNameE1TestEvent) ContractEventName() string {
// Solidity: event _1TestEvent(address _param)
func (numericMethodName *NumericMethodName) UnpackE1TestEventEvent(log *types.Log) (*NumericMethodNameE1TestEvent, error) {
event := "_1TestEvent"
- if log.Topics[0] != numericMethodName.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != numericMethodName.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(NumericMethodNameE1TestEvent)
diff --git a/accounts/abi/abigen/testdata/v2/overload.go.txt b/accounts/abi/abigen/testdata/v2/overload.go.txt
index 3b9a95a125..ddddd10186 100644
--- a/accounts/abi/abigen/testdata/v2/overload.go.txt
+++ b/accounts/abi/abigen/testdata/v2/overload.go.txt
@@ -114,7 +114,7 @@ func (OverloadBar) ContractEventName() string {
// Solidity: event bar(uint256 i)
func (overload *Overload) UnpackBarEvent(log *types.Log) (*OverloadBar, error) {
event := "bar"
- if log.Topics[0] != overload.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != overload.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(OverloadBar)
@@ -156,7 +156,7 @@ func (OverloadBar0) ContractEventName() string {
// Solidity: event bar(uint256 i, uint256 j)
func (overload *Overload) UnpackBar0Event(log *types.Log) (*OverloadBar0, error) {
event := "bar0"
- if log.Topics[0] != overload.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != overload.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(OverloadBar0)
diff --git a/accounts/abi/abigen/testdata/v2/token.go.txt b/accounts/abi/abigen/testdata/v2/token.go.txt
index 69294f375a..6ebc96861b 100644
--- a/accounts/abi/abigen/testdata/v2/token.go.txt
+++ b/accounts/abi/abigen/testdata/v2/token.go.txt
@@ -386,7 +386,7 @@ func (TokenTransfer) ContractEventName() string {
// Solidity: event Transfer(address indexed from, address indexed to, uint256 value)
func (token *Token) UnpackTransferEvent(log *types.Log) (*TokenTransfer, error) {
event := "Transfer"
- if log.Topics[0] != token.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != token.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(TokenTransfer)
diff --git a/accounts/abi/abigen/testdata/v2/tuple.go.txt b/accounts/abi/abigen/testdata/v2/tuple.go.txt
index 76a1f58d52..4724fdd351 100644
--- a/accounts/abi/abigen/testdata/v2/tuple.go.txt
+++ b/accounts/abi/abigen/testdata/v2/tuple.go.txt
@@ -193,7 +193,7 @@ func (TupleTupleEvent) ContractEventName() string {
// Solidity: event TupleEvent((uint256,uint256[],(uint256,uint256)[]) a, (uint256,uint256)[2][] b, (uint256,uint256)[][2] c, (uint256,uint256[],(uint256,uint256)[])[] d, uint256[] e)
func (tuple *Tuple) UnpackTupleEventEvent(log *types.Log) (*TupleTupleEvent, error) {
event := "TupleEvent"
- if log.Topics[0] != tuple.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != tuple.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(TupleTupleEvent)
@@ -234,7 +234,7 @@ func (TupleTupleEvent2) ContractEventName() string {
// Solidity: event TupleEvent2((uint8,uint8)[] arg0)
func (tuple *Tuple) UnpackTupleEvent2Event(log *types.Log) (*TupleTupleEvent2, error) {
event := "TupleEvent2"
- if log.Topics[0] != tuple.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != tuple.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(TupleTupleEvent2)
diff --git a/accounts/abi/bind/v2/internal/contracts/db/bindings.go b/accounts/abi/bind/v2/internal/contracts/db/bindings.go
index 4680adf283..34e827489e 100644
--- a/accounts/abi/bind/v2/internal/contracts/db/bindings.go
+++ b/accounts/abi/bind/v2/internal/contracts/db/bindings.go
@@ -276,7 +276,7 @@ func (DBInsert) ContractEventName() string {
// Solidity: event Insert(uint256 key, uint256 value, uint256 length)
func (dB *DB) UnpackInsertEvent(log *types.Log) (*DBInsert, error) {
event := "Insert"
- if log.Topics[0] != dB.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dB.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DBInsert)
@@ -318,7 +318,7 @@ func (DBKeyedInsert) ContractEventName() string {
// Solidity: event KeyedInsert(uint256 indexed key, uint256 value)
func (dB *DB) UnpackKeyedInsertEvent(log *types.Log) (*DBKeyedInsert, error) {
event := "KeyedInsert"
- if log.Topics[0] != dB.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != dB.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(DBKeyedInsert)
diff --git a/accounts/abi/bind/v2/internal/contracts/events/bindings.go b/accounts/abi/bind/v2/internal/contracts/events/bindings.go
index 13af0fbf4a..f4f9ad38fa 100644
--- a/accounts/abi/bind/v2/internal/contracts/events/bindings.go
+++ b/accounts/abi/bind/v2/internal/contracts/events/bindings.go
@@ -115,7 +115,7 @@ func (CBasic1) ContractEventName() string {
// Solidity: event basic1(uint256 indexed id, uint256 data)
func (c *C) UnpackBasic1Event(log *types.Log) (*CBasic1, error) {
event := "basic1"
- if log.Topics[0] != c.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != c.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(CBasic1)
@@ -157,7 +157,7 @@ func (CBasic2) ContractEventName() string {
// Solidity: event basic2(bool indexed flag, uint256 data)
func (c *C) UnpackBasic2Event(log *types.Log) (*CBasic2, error) {
event := "basic2"
- if log.Topics[0] != c.abi.Events[event].ID {
+ if len(log.Topics) == 0 || log.Topics[0] != c.abi.Events[event].ID {
return nil, errors.New("event signature mismatch")
}
out := new(CBasic2)
diff --git a/accounts/abi/bind/v2/lib_test.go b/accounts/abi/bind/v2/lib_test.go
index ee1db9cf86..11360fc7dd 100644
--- a/accounts/abi/bind/v2/lib_test.go
+++ b/accounts/abi/bind/v2/lib_test.go
@@ -367,3 +367,28 @@ func TestErrors(t *testing.T) {
t.Fatalf("bad unpacked error result: expected Arg4 to be false. got true")
}
}
+
+func TestEventUnpackEmptyTopics(t *testing.T) {
+ c := events.NewC()
+
+ for _, log := range []*types.Log{
+ {Topics: []common.Hash{}},
+ {Topics: nil},
+ } {
+ _, err := c.UnpackBasic1Event(log)
+ if err == nil {
+ t.Fatal("expected error when unpacking event with empty topics, got nil")
+ }
+ if err.Error() != "event signature mismatch" {
+ t.Fatalf("expected 'event signature mismatch' error, got: %v", err)
+ }
+
+ _, err = c.UnpackBasic2Event(log)
+ if err == nil {
+ t.Fatal("expected error when unpacking event with empty topics, got nil")
+ }
+ if err.Error() != "event signature mismatch" {
+ t.Fatalf("expected 'event signature mismatch' error, got: %v", err)
+ }
+ }
+}
diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go
index 729ca93c54..f6696ea978 100644
--- a/accounts/abi/reflect.go
+++ b/accounts/abi/reflect.go
@@ -53,7 +53,7 @@ func ConvertType(in interface{}, proto interface{}) interface{} {
// indirect recursively dereferences the value until it either gets the value
// or finds a big.Int
func indirect(v reflect.Value) reflect.Value {
- if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) {
+ if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeFor[big.Int]() {
return indirect(v.Elem())
}
return v
@@ -65,32 +65,32 @@ func reflectIntType(unsigned bool, size int) reflect.Type {
if unsigned {
switch size {
case 8:
- return reflect.TypeOf(uint8(0))
+ return reflect.TypeFor[uint8]()
case 16:
- return reflect.TypeOf(uint16(0))
+ return reflect.TypeFor[uint16]()
case 32:
- return reflect.TypeOf(uint32(0))
+ return reflect.TypeFor[uint32]()
case 64:
- return reflect.TypeOf(uint64(0))
+ return reflect.TypeFor[uint64]()
}
}
switch size {
case 8:
- return reflect.TypeOf(int8(0))
+ return reflect.TypeFor[int8]()
case 16:
- return reflect.TypeOf(int16(0))
+ return reflect.TypeFor[int16]()
case 32:
- return reflect.TypeOf(int32(0))
+ return reflect.TypeFor[int32]()
case 64:
- return reflect.TypeOf(int64(0))
+ return reflect.TypeFor[int64]()
}
- return reflect.TypeOf(&big.Int{})
+ return reflect.TypeFor[*big.Int]()
}
// mustArrayToByteSlice creates a new byte slice with the exact same size as value
// and copies the bytes in value to the new slice.
func mustArrayToByteSlice(value reflect.Value) reflect.Value {
- slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len())
+ slice := reflect.ValueOf(make([]byte, value.Len()))
reflect.Copy(slice, value)
return slice
}
@@ -104,7 +104,7 @@ func set(dst, src reflect.Value) error {
switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
return set(dst.Elem(), src)
- case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
+ case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeFor[big.Int]():
return set(dst.Elem(), src)
case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go
index 577fa6ca71..f5e509c52f 100644
--- a/accounts/abi/reflect_test.go
+++ b/accounts/abi/reflect_test.go
@@ -204,12 +204,12 @@ func TestConvertType(t *testing.T) {
var fields []reflect.StructField
fields = append(fields, reflect.StructField{
Name: "X",
- Type: reflect.TypeOf(new(big.Int)),
+ Type: reflect.TypeFor[*big.Int](),
Tag: "json:\"" + "x" + "\"",
})
fields = append(fields, reflect.StructField{
Name: "Y",
- Type: reflect.TypeOf(new(big.Int)),
+ Type: reflect.TypeFor[*big.Int](),
Tag: "json:\"" + "y" + "\"",
})
val := reflect.New(reflect.StructOf(fields))
diff --git a/accounts/abi/type.go b/accounts/abi/type.go
index e59456f15a..2fd11ac123 100644
--- a/accounts/abi/type.go
+++ b/accounts/abi/type.go
@@ -238,9 +238,9 @@ func (t Type) GetType() reflect.Type {
case UintTy:
return reflectIntType(true, t.Size)
case BoolTy:
- return reflect.TypeOf(false)
+ return reflect.TypeFor[bool]()
case StringTy:
- return reflect.TypeOf("")
+ return reflect.TypeFor[string]()
case SliceTy:
return reflect.SliceOf(t.Elem.GetType())
case ArrayTy:
@@ -248,19 +248,15 @@ func (t Type) GetType() reflect.Type {
case TupleTy:
return t.TupleType
case AddressTy:
- return reflect.TypeOf(common.Address{})
+ return reflect.TypeFor[common.Address]()
case FixedBytesTy:
- return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0)))
+ return reflect.ArrayOf(t.Size, reflect.TypeFor[byte]())
case BytesTy:
- return reflect.SliceOf(reflect.TypeOf(byte(0)))
- case HashTy:
- // hashtype currently not used
- return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
- case FixedPointTy:
- // fixedpoint type currently not used
- return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
+ return reflect.TypeFor[[]byte]()
+ case HashTy, FixedPointTy: // currently not used
+ return reflect.TypeFor[[32]byte]()
case FunctionTy:
- return reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
+ return reflect.TypeFor[[24]byte]()
default:
panic("Invalid type")
}
diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go
index 17b077d387..c750f5da5d 100644
--- a/accounts/keystore/keystore.go
+++ b/accounts/keystore/keystore.go
@@ -50,7 +50,7 @@ var (
)
// KeyStoreType is the reflect type of a keystore backend.
-var KeyStoreType = reflect.TypeOf(&KeyStore{})
+var KeyStoreType = reflect.TypeFor[*KeyStore]()
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
const KeyStoreScheme = "keystore"
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index 59176e6591..08b3b6173e 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -472,6 +472,11 @@ func (w *Wallet) selfDerive() {
continue
}
pairing := w.Hub.pairing(w)
+ if pairing == nil {
+ w.lock.Unlock()
+ reqc <- struct{}{}
+ continue
+ }
// Device lock obtained, derive the next batch of accounts
var (
@@ -631,13 +636,13 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
}
if pin {
- pairing := w.Hub.pairing(w)
- pairing.Accounts[account.Address] = path
- if err := w.Hub.setPairing(w, pairing); err != nil {
- return accounts.Account{}, err
+ if pairing := w.Hub.pairing(w); pairing != nil {
+ pairing.Accounts[account.Address] = path
+ if err := w.Hub.setPairing(w, pairing); err != nil {
+ return accounts.Account{}, err
+ }
}
}
-
return account, nil
}
@@ -774,11 +779,11 @@ func (w *Wallet) SignTxWithPassphrase(account accounts.Account, passphrase strin
// It first checks for the address in the list of pinned accounts, and if it is
// not found, attempts to parse the derivation path from the account's URL.
func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationPath, error) {
- pairing := w.Hub.pairing(w)
- if path, ok := pairing.Accounts[account.Address]; ok {
- return path, nil
+ if pairing := w.Hub.pairing(w); pairing != nil {
+ if path, ok := pairing.Accounts[account.Address]; ok {
+ return path, nil
+ }
}
-
// Look for the path in the URL
if account.URL.Scheme != w.Hub.scheme {
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go
index ace313dcaf..16b41c75ac 100644
--- a/accounts/usbwallet/ledger.go
+++ b/accounts/usbwallet/ledger.go
@@ -166,7 +166,7 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
return common.Address{}, nil, accounts.ErrWalletClosed
}
// Ensure the wallet is capable of signing the given transaction
- if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
+ if chainID != nil && (w.version[0] < 1 || (w.version[0] == 1 && w.version[1] == 0 && w.version[2] < 3)) {
//lint:ignore ST1005 brand name displayed on the console
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
}
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index 3c708d17db..b165686fcd 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -60,7 +60,7 @@ type PayloadAttributes struct {
// and contains encoded EIP-1559 parameters. See:
// https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#eip1559params-encoding
EIP1559Params []byte `json:"eip1559Params,omitempty" gencodec:"optional"`
- // MinBaseFee is a field for rollups implementing the Jovian upgrade's minimum base fee feature.
+ // MinBaseFee is a field for rollups implementing the minimum base fee feature.
// See https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/jovian/exec-engine.md#minimum-base-fee-in-payloadattributesv3
MinBaseFee *uint64 `json:"minBaseFee,omitempty" gencodec:"optional"`
}
diff --git a/beacon/merkle/merkle.go b/beacon/merkle/merkle.go
index 30896f9b01..64dfadfab5 100644
--- a/beacon/merkle/merkle.go
+++ b/beacon/merkle/merkle.go
@@ -32,7 +32,7 @@ type Value [32]byte
// Values represent a series of merkle tree leaves/nodes.
type Values []Value
-var valueT = reflect.TypeOf(Value{})
+var valueT = reflect.TypeFor[Value]()
// UnmarshalJSON parses a merkle value in hex syntax.
func (m *Value) UnmarshalJSON(input []byte) error {
diff --git a/build/checksums.txt b/build/checksums.txt
index 7641b9ae62..ab0f7547f6 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -5,54 +5,54 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/fusaka-devnet-3%40v1.0.0
576261e1280e5300c458aa9b05eccb2fec5ff80a0005940dc52fa03fdd907249 fixtures_fusaka-devnet-3.tar.gz
-# version:golang 1.24.4
+# version:golang 1.25.0
# https://go.dev/dl/
-5a86a83a31f9fa81490b8c5420ac384fd3d95a3e71fba665c7b3f95d1dfef2b4 go1.24.4.src.tar.gz
-0d2af78e3b6e08f8013dbbdb26ae33052697b6b72e03ec17d496739c2a1aed68 go1.24.4.aix-ppc64.tar.gz
-69bef555e114b4a2252452b6e7049afc31fbdf2d39790b669165e89525cd3f5c go1.24.4.darwin-amd64.tar.gz
-c4d74453a26f488bdb4b0294da4840d9020806de4661785334eb6d1803ee5c27 go1.24.4.darwin-amd64.pkg
-27973684b515eaf461065054e6b572d9390c05e69ba4a423076c160165336470 go1.24.4.darwin-arm64.tar.gz
-2fe1f8746745c4bfebd494583aaef24cad42594f6d25ed67856879d567ee66e7 go1.24.4.darwin-arm64.pkg
-70b2de9c1cafe5af7be3eb8f80753cce0501ef300db3f3bd59be7ccc464234e1 go1.24.4.dragonfly-amd64.tar.gz
-8d529839db29ee171505b89dc9c3de76003a4ab56202d84bddbbecacbfb6d7c9 go1.24.4.freebsd-386.tar.gz
-6cbc3ad6cc21bdcc7283824d3ac0e85512c02022f6a35eb2e844882ea6e8448c go1.24.4.freebsd-amd64.tar.gz
-d49ae050c20aff646a7641dd903f03eb674570790b90ffb298076c4d41e36655 go1.24.4.freebsd-arm.tar.gz
-e31924abef2a28456b7103c0a5d333dcc11ecf19e76d5de1a383ad5fe0b42457 go1.24.4.freebsd-arm64.tar.gz
-b5bca135eae8ebddf22972611ac1c58ae9fbb5979fd953cc5245c5b1b2517546 go1.24.4.freebsd-riscv64.tar.gz
-7d5efda511ff7e3114b130acee5d0bffbb078fedbfa9b2c1b6a807107e1ca23a go1.24.4.illumos-amd64.tar.gz
-130c9b061082eca15513e595e9952a2ded32e737e609dd0e49f7dfa74eba026d go1.24.4.linux-386.tar.gz
-77e5da33bb72aeaef1ba4418b6fe511bc4d041873cbf82e5aa6318740df98717 go1.24.4.linux-amd64.tar.gz
-d5501ee5aca0f258d5fe9bfaed401958445014495dc115f202d43d5210b45241 go1.24.4.linux-arm64.tar.gz
-6a554e32301cecae3162677e66d4264b81b3b1a89592dd1b7b5c552c7a49fe37 go1.24.4.linux-armv6l.tar.gz
-b208eb25fe244408cbe269ed426454bc46e59d0e0a749b6240d39e884e969875 go1.24.4.linux-loong64.tar.gz
-fddfcb28fd36fe63d2ae181026798f86f3bbd3a7bb0f1e1f617dd3d604bf3fe4 go1.24.4.linux-mips.tar.gz
-7934b924d5ab8c8ae3134a09a6ae74d3c39f63f6c4322ec289364dbbf0bac3ca go1.24.4.linux-mips64.tar.gz
-fa763d8673f94d6e534bb72c3cf675d4c2b8da4a6da42a89f08c5586106db39c go1.24.4.linux-mips64le.tar.gz
-84363dbfe49b41d43df84420a09bd53a4770053d63bfa509868c46a5f8eb3ff7 go1.24.4.linux-mipsle.tar.gz
-28fcbd5d3b56493606873c33f2b4bdd84ba93c633f37313613b5a1e6495c6fe5 go1.24.4.linux-ppc64.tar.gz
-9ca4afef813a2578c23843b640ae0290aa54b2e3c950a6cc4c99e16a57dec2ec go1.24.4.linux-ppc64le.tar.gz
-1d7034f98662d8f2c8abd7c700ada4093acb4f9c00e0e51a30344821d0785c77 go1.24.4.linux-riscv64.tar.gz
-0449f3203c39703ab27684be763e9bb78ca9a051e0e4176727aead9461b6deb5 go1.24.4.linux-s390x.tar.gz
-954b49ccc2cfcf4b5f7cd33ff662295e0d3b74e7590c8e25fc2abb30bce120ba go1.24.4.netbsd-386.tar.gz
-370fabcdfee7c18857c96fdd5b706e025d4fb86a208da88ba56b1493b35498e9 go1.24.4.netbsd-amd64.tar.gz
-7935ef95d4d1acc48587b1eb4acab98b0a7d9569736a32398b9c1d2e89026865 go1.24.4.netbsd-arm.tar.gz
-ead78fd0fa29fbb176cc83f1caa54032e1a44f842affa56a682c647e0759f237 go1.24.4.netbsd-arm64.tar.gz
-913e217394b851a636b99de175f0c2f9ab9938b41c557f047168f77ee485d776 go1.24.4.openbsd-386.tar.gz
-24568da3dcbcdb24ec18b631f072faf0f3763e3d04f79032dc56ad9ec35379c4 go1.24.4.openbsd-amd64.tar.gz
-45abf523f870632417ab007de3841f64dd906bde546ffc8c6380ccbe91c7fb73 go1.24.4.openbsd-arm.tar.gz
-7c57c69b5dd1e946b28a3034c285240a48e2861bdcb50b7d9c0ed61bcf89c879 go1.24.4.openbsd-arm64.tar.gz
-91ed711f704829372d6931e1897631ef40288b8f9e3cd6ef4a24df7126d1066a go1.24.4.openbsd-ppc64.tar.gz
-de5e270d971c8790e8880168d56a2ea103979927c10ded136d792bbdf9bce3d3 go1.24.4.openbsd-riscv64.tar.gz
-ff429d03f00bcd32a50f445320b8329d0fadb2a2fff899c11e95e0922a82c543 go1.24.4.plan9-386.tar.gz
-39d6363a43fd16b60ae9ad7346a264e982e4fa653dee3b45f83e03cd2f7a6647 go1.24.4.plan9-amd64.tar.gz
-1964ae2571259de77b930e97f2891aa92706ff81aac9909d45bb107b0fab16c8 go1.24.4.plan9-arm.tar.gz
-a7f9af424e8fb87886664754badca459513f64f6a321d17f1d219b8edf519821 go1.24.4.solaris-amd64.tar.gz
-d454d3cb144432f1726bf00e28c6017e78ccb256a8d01b8e3fb1b2e6b5650f28 go1.24.4.windows-386.zip
-966ecace1cdbb3497a2b930bdb0f90c3ad32922fa1a7c655b2d4bbeb7e4ac308 go1.24.4.windows-386.msi
-b751a1136cb9d8a2e7ebb22c538c4f02c09b98138c7c8bfb78a54a4566c013b1 go1.24.4.windows-amd64.zip
-0cbb6e83865747dbe69b3d4155f92e88fcf336ff5d70182dba145e9d7bd3d8f6 go1.24.4.windows-amd64.msi
-d17da51bc85bd010754a4063215d15d2c033cc289d67ca9201a03c9041b2969d go1.24.4.windows-arm64.zip
-47dbe734b6a829de45654648a7abcf05bdceef5c80e03ea0b208eeebef75a852 go1.24.4.windows-arm64.msi
+4bd01e91297207bfa450ea40d4d5a93b1b531a5e438473b2a06e18e077227225 go1.25.0.src.tar.gz
+e5234a7dac67bc86c528fe9752fc9d63557918627707a733ab4cac1a6faed2d4 go1.25.0.aix-ppc64.tar.gz
+5bd60e823037062c2307c71e8111809865116714d6f6b410597cf5075dfd80ef go1.25.0.darwin-amd64.tar.gz
+95e836238bcf8f9a71bffea43344cbd35ee1f16db3aaced2f98dbac045d102db go1.25.0.darwin-amd64.pkg
+544932844156d8172f7a28f77f2ac9c15a23046698b6243f633b0a0b00c0749c go1.25.0.darwin-arm64.tar.gz
+202a0d8338c152cb4c9f04782429e9ba8bef31d9889272380837e4043c9d800a go1.25.0.darwin-arm64.pkg
+5ed3cf9a810a1483822538674f1336c06b51aa1b94d6d545a1a0319a48177120 go1.25.0.dragonfly-amd64.tar.gz
+abea5d5c6697e6b5c224731f2158fe87c602996a2a233ac0c4730cd57bf8374e go1.25.0.freebsd-386.tar.gz
+86e6fe0a29698d7601c4442052dac48bd58d532c51cccb8f1917df648138730b go1.25.0.freebsd-amd64.tar.gz
+d90b78e41921f72f30e8bbc81d9dec2cff7ff384a33d8d8debb24053e4336bfe go1.25.0.freebsd-arm.tar.gz
+451d0da1affd886bfb291b7c63a6018527b269505db21ce6e14724f22ab0662e go1.25.0.freebsd-arm64.tar.gz
+7b565f76bd8bda46549eeaaefe0e53b251e644c230577290c0f66b1ecdb3cdbe go1.25.0.freebsd-riscv64.tar.gz
+b1e1fdaab1ad25aa1c08d7a36c97d45d74b98b89c3f78c6d2145f77face54a2c go1.25.0.illumos-amd64.tar.gz
+8c602dd9d99bc9453b3995d20ce4baf382cc50855900a0ece5de9929df4a993a go1.25.0.linux-386.tar.gz
+2852af0cb20a13139b3448992e69b868e50ed0f8a1e5940ee1de9e19a123b613 go1.25.0.linux-amd64.tar.gz
+05de75d6994a2783699815ee553bd5a9327d8b79991de36e38b66862782f54ae go1.25.0.linux-arm64.tar.gz
+a5a8f8198fcf00e1e485b8ecef9ee020778bf32a408a4e8873371bfce458cd09 go1.25.0.linux-armv6l.tar.gz
+cab86b1cf761b1cb3bac86a8877cfc92e7b036fc0d3084123d77013d61432afc go1.25.0.linux-loong64.tar.gz
+d66b6fb74c3d91b9829dc95ec10ca1f047ef5e89332152f92e136cf0e2da5be1 go1.25.0.linux-mips.tar.gz
+4082e4381a8661bc2a839ff94ba3daf4f6cde20f8fb771b5b3d4762dc84198a2 go1.25.0.linux-mips64.tar.gz
+70002c299ec7f7175ac2ef673b1b347eecfa54ae11f34416a6053c17f855afcc go1.25.0.linux-mips64le.tar.gz
+b00a3a39eff099f6df9f1c7355bf28e4589d0586f42d7d4a394efb763d145a73 go1.25.0.linux-mipsle.tar.gz
+df166f33bd98160662560a72ff0b4ba731f969a80f088922bddcf566a88c1ec1 go1.25.0.linux-ppc64.tar.gz
+0f18a89e7576cf2c5fa0b487a1635d9bcbf843df5f110e9982c64df52a983ad0 go1.25.0.linux-ppc64le.tar.gz
+c018ff74a2c48d55c8ca9b07c8e24163558ffec8bea08b326d6336905d956b67 go1.25.0.linux-riscv64.tar.gz
+34e5a2e19f2292fbaf8783e3a241e6e49689276aef6510a8060ea5ef54eee408 go1.25.0.linux-s390x.tar.gz
+f8586cdb7aa855657609a5c5f6dbf523efa00c2bbd7c76d3936bec80aa6c0aba go1.25.0.netbsd-386.tar.gz
+ae8dc1469385b86a157a423bb56304ba45730de8a897615874f57dd096db2c2a go1.25.0.netbsd-amd64.tar.gz
+1ff7e4cc764425fc9dd6825eaee79d02b3c7cafffbb3691687c8d672ade76cb7 go1.25.0.netbsd-arm.tar.gz
+e1b310739f26724216aa6d7d7208c4031f9ff54c9b5b9a796ddc8bebcb4a5f16 go1.25.0.netbsd-arm64.tar.gz
+4802a9b20e533da91adb84aab42e94aa56cfe3e5475d0550bed3385b182e69d8 go1.25.0.openbsd-386.tar.gz
+c016cd984bebe317b19a4f297c4f50def120dc9788490540c89f28e42f1dabe1 go1.25.0.openbsd-amd64.tar.gz
+a1e31d0bf22172ddde42edf5ec811ef81be43433df0948ece52fecb247ccfd8d go1.25.0.openbsd-arm.tar.gz
+343ea8edd8c218196e15a859c6072d0dd3246fbbb168481ab665eb4c4140458d go1.25.0.openbsd-arm64.tar.gz
+694c14da1bcaeb5e3332d49bdc2b6d155067648f8fe1540c5de8f3cf8e157154 go1.25.0.openbsd-ppc64.tar.gz
+aa510ad25cf54c06cd9c70b6d80ded69cb20188ac6e1735655eef29ff7e7885f go1.25.0.openbsd-riscv64.tar.gz
+46f8cef02086cf04bf186c5912776b56535178d4cb319cd19c9fdbdd29231986 go1.25.0.plan9-386.tar.gz
+29b34391d84095e44608a228f63f2f88113a37b74a79781353ec043dfbcb427b go1.25.0.plan9-amd64.tar.gz
+0a047107d13ebe7943aaa6d54b1d7bbd2e45e68ce449b52915a818da715799c2 go1.25.0.plan9-arm.tar.gz
+9977f9e4351984364a3b2b78f8b88bfd1d339812356d5237678514594b7d3611 go1.25.0.solaris-amd64.tar.gz
+df9f39db82a803af0db639e3613a36681ab7a42866b1384b3f3a1045663961a7 go1.25.0.windows-386.zip
+afd9e0a8d2665ff122c8302bb4a3ce4a5331e4e630ddc388be1f9238adfa8fe3 go1.25.0.windows-386.msi
+89efb4f9b30812eee083cc1770fdd2913c14d301064f6454851428f9707d190b go1.25.0.windows-amd64.zip
+936bd87109da515f79d80211de5bc6cbda071f2cc577f7e6af1a9e754ea34819 go1.25.0.windows-amd64.msi
+27bab004c72b3d7bd05a69b6ec0fc54a309b4b78cc569dd963d8b3ec28bfdb8c go1.25.0.windows-arm64.zip
+357d030b217ff68e700b6cfc56097bc21ad493bb45b79733a052d112f5031ed9 go1.25.0.windows-arm64.msi
# version:golangci 2.0.2
# https://github.com/golangci/golangci-lint/releases/
diff --git a/build/ci.go b/build/ci.go
index c18cc8c343..a91d511a32 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -124,6 +124,7 @@ var (
"jammy", // 22.04, EOL: 04/2032
"noble", // 24.04, EOL: 04/2034
"oracular", // 24.10, EOL: 07/2025
+ "plucky", // 25.04, EOL: 01/2026
}
// This is where the tests should be unpacked.
@@ -346,10 +347,6 @@ func downloadSpecTestFixtures(csdb *download.ChecksumDB, cachedir string) string
return filepath.Join(cachedir, base)
}
-// doCheckTidy assets that the Go modules files are tidied already.
-func doCheckTidy() {
-}
-
// doCheckGenerate ensures that re-generating generated files does not cause
// any mutations in the source file tree.
func doCheckGenerate() {
diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go
index 31d1ba5ba1..f6538b1356 100644
--- a/cmd/evm/blockrunner.go
+++ b/cmd/evm/blockrunner.go
@@ -89,7 +89,7 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
continue
}
result := &testResult{Name: name, Pass: true}
- if err := tests[name].Run(false, rawdb.HashScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
+ if err := tests[name].Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
if ctx.Bool(DumpFlag.Name) {
if s, _ := chain.State(); s != nil {
result.State = dump(s)
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 112d1a539b..d0f4d6f81d 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -704,7 +704,7 @@ func pruneHistory(ctx *cli.Context) error {
return nil
}
-// downladEra is the era1 file downloader tool.
+// downloadEra is the era1 file downloader tool.
func downloadEra(ctx *cli.Context) error {
flags.CheckExclusive(ctx, eraBlockFlag, eraEpochFlag, eraAllFlag)
diff --git a/cmd/workload/filtertest.go b/cmd/workload/filtertest.go
index 11062122b8..9f0b6cab44 100644
--- a/cmd/workload/filtertest.go
+++ b/cmd/workload/filtertest.go
@@ -92,7 +92,7 @@ func (s *filterTestSuite) filterShortRange(t *utesting.T) {
}, s.queryAndCheck)
}
-// filterShortRange runs all long-range filter tests.
+// filterLongRange runs all long-range filter tests.
func (s *filterTestSuite) filterLongRange(t *utesting.T) {
s.filterRange(t, func(query *filterQuery) bool {
return query.ToBlock+1-query.FromBlock > filterRangeThreshold
diff --git a/cmd/workload/filtertestperf.go b/cmd/workload/filtertestperf.go
index c7d2fdd02a..d4f1a155f1 100644
--- a/cmd/workload/filtertestperf.go
+++ b/cmd/workload/filtertestperf.go
@@ -152,7 +152,7 @@ func (st *bucketStats) print(name string) {
name, st.count, float64(st.blocks)/float64(st.count), float64(st.logs)/float64(st.count), st.runtime/time.Duration(st.count))
}
-// writeQueries serializes the generated errors to the error file.
+// writeErrors serializes the generated errors to the error file.
func writeErrors(errorFile string, errors []*filterQuery) {
file, err := os.Create(errorFile)
if err != nil {
diff --git a/common/eta.go b/common/eta.go
new file mode 100644
index 0000000000..72c838f93d
--- /dev/null
+++ b/common/eta.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package common
+
+import "time"
+
+// CalculateETA calculates the estimated remaining time based on the
+// number of finished task, remaining task, and the time cost for finished task.
+func CalculateETA(done, left uint64, elapsed time.Duration) time.Duration {
+ if done == 0 || elapsed.Milliseconds() == 0 {
+ return 0
+ }
+
+ speed := float64(done) / float64(elapsed.Milliseconds())
+ return time.Duration(float64(left)/speed) * time.Millisecond
+}
diff --git a/common/eta_test.go b/common/eta_test.go
new file mode 100644
index 0000000000..b1dbb09e6c
--- /dev/null
+++ b/common/eta_test.go
@@ -0,0 +1,60 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package common
+
+import (
+ "testing"
+ "time"
+)
+
+func TestCalculateETA(t *testing.T) {
+ type args struct {
+ done uint64
+ left uint64
+ elapsed time.Duration
+ }
+ tests := []struct {
+ name string
+ args args
+ want time.Duration
+ }{
+ {
+ name: "zero done",
+ args: args{done: 0, left: 100, elapsed: time.Second},
+ want: 0,
+ },
+ {
+ name: "zero elapsed",
+ args: args{done: 1, left: 100, elapsed: 0},
+ want: 0,
+ },
+ {
+ name: "@Jolly23 's case",
+ args: args{done: 16858580, left: 41802252, elapsed: 66179848 * time.Millisecond},
+ want: 164098440 * time.Millisecond,
+ // wrong msg: msg="Indexing state history" processed=16858580 left=41802252 elapsed=18h22m59.848s eta=11h36m42.252s
+ // should be around 45.58 hours
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := CalculateETA(tt.args.done, tt.args.left, tt.args.elapsed); got != tt.want {
+ t.Errorf("CalculateETA() = %v ms, want %v ms", got.Milliseconds(), tt.want)
+ }
+ })
+ }
+}
diff --git a/common/hexutil/json.go b/common/hexutil/json.go
index e0ac98f52d..6b9f412078 100644
--- a/common/hexutil/json.go
+++ b/common/hexutil/json.go
@@ -28,11 +28,11 @@ import (
)
var (
- bytesT = reflect.TypeOf(Bytes(nil))
- bigT = reflect.TypeOf((*Big)(nil))
- uintT = reflect.TypeOf(Uint(0))
- uint64T = reflect.TypeOf(Uint64(0))
- u256T = reflect.TypeOf((*uint256.Int)(nil))
+ bytesT = reflect.TypeFor[Bytes]()
+ bigT = reflect.TypeFor[*Big]()
+ uintT = reflect.TypeFor[Uint]()
+ uint64T = reflect.TypeFor[Uint64]()
+ u256T = reflect.TypeFor[*uint256.Int]()
)
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
diff --git a/common/types.go b/common/types.go
index fdb25f1b34..db4de8bcbd 100644
--- a/common/types.go
+++ b/common/types.go
@@ -42,8 +42,8 @@ const (
)
var (
- hashT = reflect.TypeOf(Hash{})
- addressT = reflect.TypeOf(Address{})
+ hashT = reflect.TypeFor[Hash]()
+ addressT = reflect.TypeFor[Address]()
// MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")
@@ -466,7 +466,7 @@ func isString(input []byte) bool {
// UnmarshalJSON parses a hash in hex syntax.
func (d *Decimal) UnmarshalJSON(input []byte) error {
if !isString(input) {
- return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeOf(uint64(0))}
+ return &json.UnmarshalTypeError{Value: "non-string", Type: reflect.TypeFor[uint64]()}
}
if i, err := strconv.ParseUint(string(input[1:len(input)-1]), 10, 64); err == nil {
*d = Decimal(i)
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index a8aee6f774..366cce89fd 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -125,7 +125,7 @@ func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *ty
// Check >0 TDs with pre-merge, --0 TDs with post-merge rules
if header.Difficulty.Sign() > 0 ||
// OP-Stack: transitioned networks must use legacy consensus pre-Bedrock
- cfg.IsOptimismBedrock(header.Number) {
+ cfg.IsOptimismPreBedrock(header.Number) {
return beacon.ethone.VerifyHeader(chain, header)
}
return beacon.verifyHeader(chain, header, parent)
@@ -410,6 +410,16 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
}
}
+ // Store DA footprint in BlobGasUsed header field if it hasn't already been set yet.
+ // Builder code may already calculate it during block building to avoid recalculating it here.
+ if chain.Config().IsDAFootprintBlockLimit(header.Time) && (header.BlobGasUsed == nil || *header.BlobGasUsed == 0) {
+ daFootprint, err := types.CalcDAFootprint(body.Transactions)
+ if err != nil {
+ return nil, fmt.Errorf("error calculating DA footprint: %w", err)
+ }
+ header.BlobGasUsed = &daFootprint
+ }
+
// Assemble the final block.
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil), chain.Config())
diff --git a/consensus/misc/eip1559/eip1559.go b/consensus/misc/eip1559/eip1559.go
index 373d6f83ef..eb5dfed657 100644
--- a/consensus/misc/eip1559/eip1559.go
+++ b/consensus/misc/eip1559/eip1559.go
@@ -36,7 +36,7 @@ func VerifyEIP1559Header(config *params.ChainConfig, parent, header *types.Heade
if !config.IsLondon(parent.Number) {
parentGasLimit = parent.GasLimit * config.ElasticityMultiplier()
}
- if config.Optimism == nil { // gasLimit can adjust instantly in optimism
+ if !config.IsOptimism() { // OP Stack gasLimit can adjust instantly
if err := misc.VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil {
return err
}
@@ -75,7 +75,7 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, time uint64)
}
// OPStack addition: calculate the base fee using the upstream code.
- baseFee := calcBaseFeeInner(parent, elasticity, denominator)
+ baseFee := calcBaseFeeInner(config, parent, elasticity, denominator)
// OPStack addition: enforce minimum base fee.
// If the minimum base fee is 0, this has no effect.
@@ -89,10 +89,20 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header, time uint64)
return baseFee
}
-func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint64) *big.Int {
+func calcBaseFeeInner(config *params.ChainConfig, parent *types.Header, elasticity uint64, denominator uint64) *big.Int {
parentGasTarget := parent.GasLimit / elasticity
- // If the parent gasUsed is the same as the target, the baseFee remains unchanged.
- if parent.GasUsed == parentGasTarget {
+ parentGasMetered := parent.GasUsed
+ if config.IsDAFootprintBlockLimit(parent.Time) {
+ if parent.BlobGasUsed == nil {
+ panic("Jovian parent block has nil BlobGasUsed")
+ } else if *parent.BlobGasUsed > parent.GasUsed {
+ // Jovian updates the base fee based on the maximum of total transactions gas used and total DA footprint (which is
+ // stored in the BlobGasUsed field of the header).
+ parentGasMetered = *parent.BlobGasUsed
+ }
+ }
+ // If the parent gasMetered is the same as the target, the baseFee remains unchanged.
+ if parentGasMetered == parentGasTarget {
return new(big.Int).Set(parent.BaseFee)
}
@@ -101,10 +111,10 @@ func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint6
denom = new(big.Int)
)
- if parent.GasUsed > parentGasTarget {
+ if parentGasMetered > parentGasTarget {
// If the parent block used more gas than its target, the baseFee should increase.
// max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
- num.SetUint64(parent.GasUsed - parentGasTarget)
+ num.SetUint64(parentGasMetered - parentGasTarget)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(denominator))
@@ -115,7 +125,7 @@ func calcBaseFeeInner(parent *types.Header, elasticity uint64, denominator uint6
} else {
// Otherwise if the parent block used less gas than its target, the baseFee should decrease.
// max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
- num.SetUint64(parentGasTarget - parent.GasUsed)
+ num.SetUint64(parentGasTarget - parentGasMetered)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(denominator))
diff --git a/consensus/misc/eip1559/eip1559_optimism.go b/consensus/misc/eip1559/eip1559_optimism.go
index dd6a6d0d93..1c6f49cc22 100644
--- a/consensus/misc/eip1559/eip1559_optimism.go
+++ b/consensus/misc/eip1559/eip1559_optimism.go
@@ -8,18 +8,18 @@ import (
)
const HoloceneExtraDataVersionByte = uint8(0x00)
-const JovianExtraDataVersionByte = uint8(0x01)
+const MinBaseFeeExtraDataVersionByte = uint8(0x01)
type ForkChecker interface {
IsHolocene(time uint64) bool
- IsJovian(time uint64) bool
+ IsMinBaseFee(time uint64) bool
}
// ValidateOptimismExtraData validates the Optimism extra data.
// It uses the config and parent time to determine how to do the validation.
func ValidateOptimismExtraData(fc ForkChecker, time uint64, extraData []byte) error {
- if fc.IsJovian(time) {
- return ValidateJovianExtraData(extraData)
+ if fc.IsMinBaseFee(time) {
+ return ValidateMinBaseFeeExtraData(extraData)
} else if fc.IsHolocene(time) {
return ValidateHoloceneExtraData(extraData)
} else if len(extraData) > 0 { // pre-Holocene
@@ -32,8 +32,8 @@ func ValidateOptimismExtraData(fc ForkChecker, time uint64, extraData []byte) er
// It uses the config and parent time to determine how to do the decoding.
// The parent.extraData is expected to be valid (i.e. ValidateOptimismExtraData has been called previously)
func DecodeOptimismExtraData(fc ForkChecker, time uint64, extraData []byte) (uint64, uint64, *uint64) {
- if fc.IsJovian(time) {
- denominator, elasticity, minBaseFee := DecodeJovianExtraData(extraData)
+ if fc.IsMinBaseFee(time) {
+ denominator, elasticity, minBaseFee := DecodeMinBaseFeeExtraData(extraData)
return denominator, elasticity, minBaseFee
} else if fc.IsHolocene(time) {
denominator, elasticity := DecodeHoloceneExtraData(extraData)
@@ -45,11 +45,11 @@ func DecodeOptimismExtraData(fc ForkChecker, time uint64, extraData []byte) (uin
// EncodeOptimismExtraData encodes the Optimism extra data.
// It uses the config and parent time to determine how to do the encoding.
func EncodeOptimismExtraData(fc ForkChecker, time uint64, denominator, elasticity uint64, minBaseFee *uint64) []byte {
- if fc.IsJovian(time) {
+ if fc.IsMinBaseFee(time) {
if minBaseFee == nil {
- panic("minBaseFee cannot be nil for Jovian")
+ panic("minBaseFee cannot be nil since the MinBaseFee feature is enabled")
}
- return EncodeJovianExtraData(denominator, elasticity, *minBaseFee)
+ return EncodeMinBaseFeeExtraData(denominator, elasticity, *minBaseFee)
} else if fc.IsHolocene(time) {
return EncodeHoloceneExtraData(denominator, elasticity)
} else {
@@ -133,12 +133,12 @@ func ValidateHoloceneExtraData(extra []byte) error {
return ValidateHolocene1559Params(extra[1:])
}
-// DecodeJovianExtraData decodes the extraData parameters from the encoded form defined here:
+// DecodeMinBaseFeeExtraData decodes the extraData parameters from the encoded form defined here:
// https://specs.optimism.io/protocol/jovian/exec-engine.html
//
-// Returns 0,0,nil if the format is invalid, and d, e, nil for the Holocene length, to provide best effort behavior for non-Jovian extradata, though ValidateMinBaseFeeExtraData should be used instead of this function for
+// Returns 0,0,nil if the format is invalid, and d, e, nil for the Holocene length, to provide best effort behavior for non-MinBaseFee extradata, though ValidateMinBaseFeeExtraData should be used instead of this function for
// validity checking.
-func DecodeJovianExtraData(extra []byte) (uint64, uint64, *uint64) {
+func DecodeMinBaseFeeExtraData(extra []byte) (uint64, uint64, *uint64) {
// Best effort to decode the extraData for every block in the chain's history,
// including blocks before the minimum base fee feature was enabled.
if len(extra) == 9 {
@@ -154,27 +154,27 @@ func DecodeJovianExtraData(extra []byte) (uint64, uint64, *uint64) {
return 0, 0, nil
}
-// EncodeJovianExtraData encodes the EIP-1559 and minBaseFee parameters into the header 'ExtraData' format.
+// EncodeMinBaseFeeExtraData encodes the EIP-1559 and minBaseFee parameters into the header 'ExtraData' format.
// Will panic if EIP-1559 parameters are outside uint32 range.
-func EncodeJovianExtraData(denom, elasticity, minBaseFee uint64) []byte {
+func EncodeMinBaseFeeExtraData(denom, elasticity, minBaseFee uint64) []byte {
r := make([]byte, 17)
if denom > gomath.MaxUint32 || elasticity > gomath.MaxUint32 {
panic("eip-1559 parameters out of uint32 range")
}
- r[0] = JovianExtraDataVersionByte
+ r[0] = MinBaseFeeExtraDataVersionByte
binary.BigEndian.PutUint32(r[1:5], uint32(denom))
binary.BigEndian.PutUint32(r[5:9], uint32(elasticity))
binary.BigEndian.PutUint64(r[9:], minBaseFee)
return r
}
-// ValidateJovianExtraData checks if the header extraData is valid according to the minimum base fee feature.
-func ValidateJovianExtraData(extra []byte) error {
+// ValidateMinBaseFeeExtraData checks if the header extraData is valid according to the minimum base fee feature.
+func ValidateMinBaseFeeExtraData(extra []byte) error {
if len(extra) != 17 {
- return fmt.Errorf("jovian extraData should be 17 bytes, got %d", len(extra))
+ return fmt.Errorf("MinBaseFee extraData should be 17 bytes, got %d", len(extra))
}
- if extra[0] != JovianExtraDataVersionByte {
- return fmt.Errorf("jovian extraData version byte should be %d, got %d", JovianExtraDataVersionByte, extra[0])
+ if extra[0] != MinBaseFeeExtraDataVersionByte {
+ return fmt.Errorf("MinBaseFee extraData version byte should be %d, got %d", MinBaseFeeExtraDataVersionByte, extra[0])
}
return ValidateHolocene1559Params(extra[1:9])
}
diff --git a/consensus/misc/eip1559/eip1559_test.go b/consensus/misc/eip1559/eip1559_test.go
index b8139f963b..9e86341099 100644
--- a/consensus/misc/eip1559/eip1559_test.go
+++ b/consensus/misc/eip1559/eip1559_test.go
@@ -57,17 +57,19 @@ func config() *params.ChainConfig {
return config
}
-var TestCanyonTime = uint64(10)
-var TestHoloceneTime = uint64(12)
-var TestJovianTime = uint64(14)
+var (
+ testCanyonTime = uint64(10)
+ testHoloceneTime = uint64(12)
+ testJovianTime = uint64(14)
+)
func opConfig() *params.ChainConfig {
config := copyConfig(params.TestChainConfig)
config.LondonBlock = big.NewInt(5)
eip1559DenominatorCanyon := uint64(250)
- config.CanyonTime = &TestCanyonTime
- config.HoloceneTime = &TestHoloceneTime
- config.JovianTime = &TestJovianTime
+ config.CanyonTime = &testCanyonTime
+ config.HoloceneTime = &testHoloceneTime
+ config.JovianTime = &testJovianTime
config.Optimism = ¶ms.OptimismConfig{
EIP1559Elasticity: 6,
EIP1559Denominator: 50,
@@ -227,59 +229,74 @@ func TestCalcBaseFeeOptimismHolocene(t *testing.T) {
// TestCalcBaseFeeJovian tests that the minimum base fee is enforced
// when the computed base fee is less than the minimum base fee,
// if the feature is active and not enforced otherwise.
+// It also tests that the base fee udpate will take the DA footprint as stored
+// in the blob gas used field into account if it is larger than the gas used
+// field.
func TestCalcBaseFeeJovian(t *testing.T) {
parentGasLimit := uint64(30_000_000)
denom := uint64(50)
elasticity := uint64(3)
+ parentGasTarget := parentGasLimit / elasticity
+ const zeroParentBlobGasUsed = 0
- preJovian := TestJovianTime - 1
- postJovian := TestJovianTime
+ preJovian := testJovianTime - 1
+ postJovian := testJovianTime
tests := []struct {
- parentBaseFee int64
- parentGasUsed uint64
- parentTime uint64
- minBaseFee uint64
- expectedBaseFee uint64
+ parentBaseFee int64
+ parentGasUsed uint64
+ parentBlobGasUsed uint64
+ parentTime uint64
+ minBaseFee uint64
+ expectedBaseFee uint64
}{
// Test 0: gas used is below target, and the new calculated base fee is very low.
// But since we are pre Jovian, we don't enforce the minBaseFee.
- {1, parentGasLimit/elasticity - 1_000_000, preJovian, 1e9, 1},
+ {1, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, preJovian, 1e9, 1},
// Test 1: gas used is exactly the target gas, but the base fee is set too low so
// the base fee is expected to be the minBaseFee
- {1, parentGasLimit / elasticity, postJovian, 1e9, 1e9},
+ {1, parentGasTarget, zeroParentBlobGasUsed, postJovian, 1e9, 1e9},
// Test 2: gas used exceeds gas target, but the new calculated base fee is still
// too low so the base fee is expected to be the minBaseFee
- {1, parentGasLimit/elasticity + 1_000_000, postJovian, 1e9, 1e9},
+ {1, parentGasTarget + 1_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 1e9},
// Test 3: gas used exceeds gas target, but the new calculated base fee is higher
// than the minBaseFee, so don't enforce minBaseFee. See the calculation below:
// gasUsedDelta = gasUsed - parentGasTarget = 20_000_000 - 30_000_000 / 3 = 10_000_000
// 2e9 * 10_000_000 / 10_000_000 / 50 = 40_000_000
// 2e9 + 40_000_000 = 2_040_000_000, which is greater than minBaseFee
- {2e9, parentGasLimit/elasticity + 10_000_000, postJovian, 1e9, 2_040_000_000},
+ {2e9, parentGasTarget + 10_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 2_040_000_000},
// Test 4: gas used is below target, but the new calculated base fee is still
// too low so the base fee is expected to be the minBaseFee
- {1, parentGasLimit/elasticity - 1_000_000, postJovian, 1e9, 1e9},
+ {1, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, postJovian, 1e9, 1e9},
// Test 5: gas used is below target, and the new calculated base fee is higher
// than the minBaseFee, so don't enforce minBaseFee. See the calculation below:
// gasUsedDelta = gasUsed - parentGasTarget = 9_000_000 - 30_000_000 / 3 = -1_000_000
// 2_097_152 * -1_000_000 / 10_000_000 / 50 = -4194.304
// 2_097_152 - 4194.304 = 2_092_957.696, which is greater than minBaseFee
- {2_097_152, parentGasLimit/elasticity - 1_000_000, postJovian, 2e6, 2_092_958},
+ {2_097_152, parentGasTarget - 1_000_000, zeroParentBlobGasUsed, postJovian, 2e6, 2_092_958},
// Test 6: parent base fee already at minimum, below target => no change
- {1e4, parentGasLimit/elasticity - 1, postJovian, 1e4, 1e4},
+ {1e4, parentGasTarget - 1, zeroParentBlobGasUsed, postJovian, 1e4, 1e4},
// Test 7: parent base fee already at minimum, above target => small increase as usual
- {1e4, parentGasLimit/elasticity + 1, postJovian, 1e4, 1e4 + 1},
+ {1e4, parentGasTarget + 1, zeroParentBlobGasUsed, postJovian, 1e4, 1e4 + 1},
+
+ // Test 8: Pre-Jovian: parent base fee already at minimum, gas used at target, blob gas used at limit
+ // => no increase, minBaseFee ignored, high blob gas used ignored
+ {1e4, parentGasTarget, parentGasLimit, preJovian, 1e6, 1e4},
+ // Test 9: parent base fee already at minimum, gas used at target, da footprint above target => small increase
+ {1e4, parentGasTarget, parentGasTarget + 1, postJovian, 1e4, 1e4 + 1},
+ // Test 10: Test 3, but with high blob gas used instead of gas used
+ {2e9, parentGasTarget, parentGasTarget + 10_000_000, postJovian, 1e9, 2_040_000_000},
}
for i, test := range tests {
testName := fmt.Sprintf("test %d", i)
t.Run(testName, func(t *testing.T) {
parent := &types.Header{
- Number: common.Big32,
- GasLimit: parentGasLimit,
- GasUsed: test.parentGasUsed,
- BaseFee: big.NewInt(test.parentBaseFee),
- Time: test.parentTime,
+ Number: common.Big32,
+ GasLimit: parentGasLimit,
+ GasUsed: test.parentGasUsed,
+ BlobGasUsed: &test.parentBlobGasUsed,
+ BaseFee: big.NewInt(test.parentBaseFee),
+ Time: test.parentTime,
}
parent.Extra = EncodeOptimismExtraData(opConfig(), test.parentTime, denom, elasticity, &test.minBaseFee)
have, want := CalcBaseFee(opConfig(), parent, parent.Time+2), big.NewInt(int64(test.expectedBaseFee))
diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go
index c7c6138089..8fc07bee47 100644
--- a/consensus/misc/eip4844/eip4844.go
+++ b/consensus/misc/eip4844/eip4844.go
@@ -19,6 +19,7 @@ package eip4844
import (
"errors"
"fmt"
+ "math"
"math/big"
"github.com/ethereum/go-ethereum/core/types"
@@ -29,6 +30,66 @@ var (
minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
)
+// BlobConfig contains the parameters for blob-related formulas.
+// These can be adjusted in a fork.
+type BlobConfig struct {
+ Target int
+ Max int
+ UpdateFraction uint64
+}
+
+func (bc *BlobConfig) maxBlobGas() uint64 {
+ return uint64(bc.Max) * params.BlobTxBlobGasPerBlob
+}
+
+// blobBaseFee computes the blob fee.
+func (bc *BlobConfig) blobBaseFee(excessBlobGas uint64) *big.Int {
+ return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), new(big.Int).SetUint64(bc.UpdateFraction))
+}
+
+// blobPrice returns the price of one blob in Wei.
+func (bc *BlobConfig) blobPrice(excessBlobGas uint64) *big.Int {
+ f := bc.blobBaseFee(excessBlobGas)
+ return new(big.Int).Mul(f, big.NewInt(params.BlobTxBlobGasPerBlob))
+}
+
+func latestBlobConfig(cfg *params.ChainConfig, time uint64) *BlobConfig {
+ if cfg.BlobScheduleConfig == nil {
+ return nil
+ }
+ var (
+ london = cfg.LondonBlock
+ s = cfg.BlobScheduleConfig
+ bc *params.BlobConfig
+ )
+ switch {
+ case cfg.IsBPO5(london, time) && s.BPO5 != nil:
+ bc = s.BPO5
+ case cfg.IsBPO4(london, time) && s.BPO4 != nil:
+ bc = s.BPO4
+ case cfg.IsBPO3(london, time) && s.BPO3 != nil:
+ bc = s.BPO3
+ case cfg.IsBPO2(london, time) && s.BPO2 != nil:
+ bc = s.BPO2
+ case cfg.IsBPO1(london, time) && s.BPO1 != nil:
+ bc = s.BPO1
+ case cfg.IsOsaka(london, time) && s.Osaka != nil:
+ bc = s.Osaka
+ case cfg.IsPrague(london, time) && s.Prague != nil:
+ bc = s.Prague
+ case cfg.IsCancun(london, time) && s.Cancun != nil:
+ bc = s.Cancun
+ default:
+ return nil
+ }
+
+ return &BlobConfig{
+ Target: bc.Target,
+ Max: bc.Max,
+ UpdateFraction: bc.UpdateFraction,
+ }
+}
+
// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that
// if the current block contains no transactions, the excessBlobGas is updated
// accordingly.
@@ -36,21 +97,31 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade
if header.Number.Uint64() != parent.Number.Uint64()+1 {
panic("bad header pair")
}
- // Verify the header is not malformed
+
+ bcfg := latestBlobConfig(config, header.Time)
+ if bcfg == nil && !config.IsOptimism() {
+ panic("called before EIP-4844 is active")
+ }
+
if header.ExcessBlobGas == nil {
return errors.New("header is missing excessBlobGas")
}
if header.BlobGasUsed == nil {
return errors.New("header is missing blobGasUsed")
}
- // Verify that the blob gas used remains within reasonable limits.
- maxBlobGas := MaxBlobGasPerBlock(config, header.Time)
- if *header.BlobGasUsed > maxBlobGas {
- return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, maxBlobGas)
- }
- if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
- return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
+
+ // OP Stack sets a zero blobGasUsed pre-Jovian. Post-Jovian, it stores the DA footprint, which is
+ // probably not a multiple of [params.BlobTxBlobGasPerBlob].
+ if !config.IsOptimism() {
+ // Verify that the blob gas used remains within reasonable limits.
+ if *header.BlobGasUsed > bcfg.maxBlobGas() {
+ return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, bcfg.maxBlobGas())
+ }
+ if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
+ return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
+ }
}
+
// Verify the excessBlobGas is correct based on the parent header
expectedExcessBlobGas := CalcExcessBlobGas(config, parent, header.Time)
if *header.ExcessBlobGas != expectedExcessBlobGas {
@@ -62,38 +133,51 @@ func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Heade
// CalcExcessBlobGas calculates the excess blob gas after applying the set of
// blobs on top of the excess blob gas.
func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header, headTimestamp uint64) uint64 {
- var (
- parentExcessBlobGas uint64
- parentBlobGasUsed uint64
- )
+ // OP-Stack chains don't support blobs, but still set the excessBlobGas field (always to zero).
+ // So this function is called in many places for OP-Stack chains too. In order to not require
+ // a blob schedule in the chain config, we short circuit here.
+ if config.IsOptimism() {
+ if config.BlobScheduleConfig != nil {
+ panic("OP-Stack: CalcBlobFee: unexpected blob schedule or excess blob gas")
+ }
+ return 0
+ }
+
+ isOsaka := config.IsOsaka(config.LondonBlock, headTimestamp)
+ bcfg := latestBlobConfig(config, headTimestamp)
+ return calcExcessBlobGas(isOsaka, bcfg, parent)
+}
+
+func calcExcessBlobGas(isOsaka bool, bcfg *BlobConfig, parent *types.Header) uint64 {
+ var parentExcessBlobGas, parentBlobGasUsed uint64
if parent.ExcessBlobGas != nil {
parentExcessBlobGas = *parent.ExcessBlobGas
parentBlobGasUsed = *parent.BlobGasUsed
}
+
var (
excessBlobGas = parentExcessBlobGas + parentBlobGasUsed
- target = targetBlobsPerBlock(config, headTimestamp)
- targetGas = uint64(target) * params.BlobTxBlobGasPerBlob
+ targetGas = uint64(bcfg.Target) * params.BlobTxBlobGasPerBlob
)
if excessBlobGas < targetGas {
return 0
}
- if !config.IsOsaka(config.LondonBlock, headTimestamp) {
- // Pre-Osaka, we use the formula defined by EIP-4844.
- return excessBlobGas - targetGas
- }
- // EIP-7918 (post-Osaka) introduces a different formula for computing excess.
- var (
- baseCost = big.NewInt(params.BlobBaseCost)
- reservePrice = baseCost.Mul(baseCost, parent.BaseFee)
- blobPrice = calcBlobPrice(config, parent)
- )
- if reservePrice.Cmp(blobPrice) > 0 {
- max := MaxBlobsPerBlock(config, headTimestamp)
- scaledExcess := parentBlobGasUsed * uint64(max-target) / uint64(max)
- return parentExcessBlobGas + scaledExcess
+ // EIP-7918 (post-Osaka) introduces a different formula for computing excess,
+ // in cases where the price is lower than a 'reserve price'.
+ if isOsaka {
+ var (
+ baseCost = big.NewInt(params.BlobBaseCost)
+ reservePrice = baseCost.Mul(baseCost, parent.BaseFee)
+ blobPrice = bcfg.blobPrice(parentExcessBlobGas)
+ )
+ if reservePrice.Cmp(blobPrice) > 0 {
+ scaledExcess := parentBlobGasUsed * uint64(bcfg.Max-bcfg.Target) / uint64(bcfg.Max)
+ return parentExcessBlobGas + scaledExcess
+ }
}
+
+ // Original EIP-4844 formula.
return excessBlobGas - targetGas
}
@@ -113,7 +197,7 @@ func CalcBlobFee(config *params.ChainConfig, header *types.Header) *big.Int {
if blobConfig == nil {
panic("calculating blob fee on unsupported fork")
}
- return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(*header.ExcessBlobGas), new(big.Int).SetUint64(blobConfig.UpdateFraction))
+ return blobConfig.blobBaseFee(*header.ExcessBlobGas)
}
// MaxBlobsPerBlock returns the max blobs per block for a block at the given timestamp.
@@ -125,36 +209,6 @@ func MaxBlobsPerBlock(cfg *params.ChainConfig, time uint64) int {
return blobConfig.Max
}
-func latestBlobConfig(cfg *params.ChainConfig, time uint64) *params.BlobConfig {
- if cfg.BlobScheduleConfig == nil {
- return nil
- }
- var (
- london = cfg.LondonBlock
- s = cfg.BlobScheduleConfig
- )
- switch {
- case cfg.IsBPO5(london, time) && s.BPO5 != nil:
- return s.BPO5
- case cfg.IsBPO4(london, time) && s.BPO4 != nil:
- return s.BPO4
- case cfg.IsBPO3(london, time) && s.BPO3 != nil:
- return s.BPO3
- case cfg.IsBPO2(london, time) && s.BPO2 != nil:
- return s.BPO2
- case cfg.IsBPO1(london, time) && s.BPO1 != nil:
- return s.BPO1
- case cfg.IsOsaka(london, time) && s.Osaka != nil:
- return s.Osaka
- case cfg.IsPrague(london, time) && s.Prague != nil:
- return s.Prague
- case cfg.IsCancun(london, time) && s.Cancun != nil:
- return s.Cancun
- default:
- return nil
- }
-}
-
// MaxBlobGasPerBlock returns the maximum blob gas that can be spent in a block at the given timestamp.
func MaxBlobGasPerBlock(cfg *params.ChainConfig, time uint64) uint64 {
return uint64(MaxBlobsPerBlock(cfg, time)) * params.BlobTxBlobGasPerBlob
@@ -163,39 +217,11 @@ func MaxBlobGasPerBlock(cfg *params.ChainConfig, time uint64) uint64 {
// LatestMaxBlobsPerBlock returns the latest max blobs per block defined by the
// configuration, regardless of the currently active fork.
func LatestMaxBlobsPerBlock(cfg *params.ChainConfig) int {
- s := cfg.BlobScheduleConfig
- if s == nil {
- return 0
- }
- switch {
- case s.BPO5 != nil:
- return s.BPO5.Max
- case s.BPO4 != nil:
- return s.BPO4.Max
- case s.BPO3 != nil:
- return s.BPO3.Max
- case s.BPO2 != nil:
- return s.BPO2.Max
- case s.BPO1 != nil:
- return s.BPO1.Max
- case s.Osaka != nil:
- return s.Osaka.Max
- case s.Prague != nil:
- return s.Prague.Max
- case s.Cancun != nil:
- return s.Cancun.Max
- default:
- return 0
- }
-}
-
-// targetBlobsPerBlock returns the target number of blobs in a block at the given timestamp.
-func targetBlobsPerBlock(cfg *params.ChainConfig, time uint64) int {
- blobConfig := latestBlobConfig(cfg, time)
- if blobConfig == nil {
+ bcfg := latestBlobConfig(cfg, math.MaxUint64)
+ if bcfg == nil {
return 0
}
- return blobConfig.Target
+ return bcfg.Max
}
// fakeExponential approximates factor * e ** (numerator / denominator) using
diff --git a/consensus/misc/eip4844/eip4844_test.go b/consensus/misc/eip4844/eip4844_test.go
index 974c0001f3..9879f08d6e 100644
--- a/consensus/misc/eip4844/eip4844_test.go
+++ b/consensus/misc/eip4844/eip4844_test.go
@@ -30,9 +30,10 @@ import (
func TestCalcExcessBlobGas(t *testing.T) {
var (
config = params.MainnetChainConfig
- targetBlobs = targetBlobsPerBlock(config, *config.CancunTime)
+ targetBlobs = config.BlobScheduleConfig.Cancun.Target
targetBlobGas = uint64(targetBlobs) * params.BlobTxBlobGasPerBlob
)
+
var tests = []struct {
excess uint64
blobs int
@@ -114,6 +115,65 @@ func TestCalcBlobFeeOPStack(t *testing.T) {
reqPanic()
}
+func TestCalcBlobFeePostOsaka(t *testing.T) {
+ zero := uint64(0)
+ bpo1 := uint64(1754836608)
+ bpo2 := uint64(1754934912)
+ bpo3 := uint64(1755033216)
+
+ tests := []struct {
+ excessBlobGas uint64
+ blobGasUsed uint64
+ blobfee uint64
+ basefee uint64
+ parenttime uint64
+ headertime uint64
+ }{
+ {5149252, 1310720, 5617366, 30, 1754904516, 1754904528},
+ {19251039, 2490368, 20107103, 50, 1755033204, 1755033216},
+ }
+ for i, tt := range tests {
+ config := ¶ms.ChainConfig{
+ LondonBlock: big.NewInt(0),
+ CancunTime: &zero,
+ PragueTime: &zero,
+ OsakaTime: &zero,
+ BPO1Time: &bpo1,
+ BPO2Time: &bpo2,
+ BPO3Time: &bpo3,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: ¶ms.BlobConfig{
+ Target: 9,
+ Max: 14,
+ UpdateFraction: 8832827,
+ },
+ BPO2: ¶ms.BlobConfig{
+ Target: 14,
+ Max: 21,
+ UpdateFraction: 13739630,
+ },
+ BPO3: ¶ms.BlobConfig{
+ Target: 21,
+ Max: 32,
+ UpdateFraction: 20609697,
+ },
+ }}
+ parent := &types.Header{
+ ExcessBlobGas: &tt.excessBlobGas,
+ BlobGasUsed: &tt.blobGasUsed,
+ BaseFee: big.NewInt(int64(tt.basefee)),
+ Time: tt.parenttime,
+ }
+ have := CalcExcessBlobGas(config, parent, tt.headertime)
+ if have != tt.blobfee {
+ t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
+ }
+ }
+}
+
func TestFakeExponential(t *testing.T) {
tests := []struct {
factor int64
@@ -155,9 +215,10 @@ func TestFakeExponential(t *testing.T) {
func TestCalcExcessBlobGasEIP7918(t *testing.T) {
var (
cfg = params.MergedTestChainConfig
- targetBlobs = targetBlobsPerBlock(cfg, *cfg.CancunTime)
+ targetBlobs = cfg.BlobScheduleConfig.Osaka.Target
blobGasTarget = uint64(targetBlobs) * params.BlobTxBlobGasPerBlob
)
+
makeHeader := func(parentExcess, parentBaseFee uint64, blobsUsed int) *types.Header {
blobGasUsed := uint64(blobsUsed) * params.BlobTxBlobGasPerBlob
return &types.Header{
diff --git a/consensus/misc/gaslimit.go b/consensus/misc/gaslimit.go
index dfcabd9a80..9ae8c95f4b 100644
--- a/consensus/misc/gaslimit.go
+++ b/consensus/misc/gaslimit.go
@@ -32,7 +32,7 @@ func VerifyGaslimit(parentGasLimit, headerGasLimit uint64) error {
}
limit := parentGasLimit / params.GasLimitBoundDivisor
if uint64(diff) >= limit {
- return fmt.Errorf("invalid gas limit: have %d, want %d +-= %d", headerGasLimit, parentGasLimit, limit-1)
+ return fmt.Errorf("invalid gas limit: have %d, want %d +/- %d", headerGasLimit, parentGasLimit, limit-1)
}
if headerGasLimit < params.MinGasLimit {
return fmt.Errorf("invalid gas limit below %d", params.MinGasLimit)
diff --git a/core/block_validator.go b/core/block_validator.go
index 33987fa369..05c03235ed 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -106,7 +106,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
// Check blob gas usage.
- if header.BlobGasUsed != nil {
+ if !v.config.IsOptimism() && header.BlobGasUsed != nil {
if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated
return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob)
}
@@ -116,6 +116,23 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
}
}
+ // OP Stack Jovian DA footprint block limit.
+ if v.config.IsDAFootprintBlockLimit(header.Time) {
+ if header.BlobGasUsed == nil {
+ return errors.New("nil blob gas used in post-Jovian block header, should store DA footprint")
+ }
+ blobGasUsed := *header.BlobGasUsed
+ daFootprint, err := types.CalcDAFootprint(block.Transactions())
+ if err != nil {
+ return fmt.Errorf("failed to calculate DA footprint: %w", err)
+ } else if blobGasUsed != daFootprint {
+ return fmt.Errorf("invalid DA footprint in blobGasUsed field (remote: %d local: %d)", blobGasUsed, daFootprint)
+ }
+ if daFootprint > block.GasLimit() {
+ return fmt.Errorf("DA footprint %d exceeds block gas limit %d", daFootprint, block.GasLimit())
+ }
+ }
+
// Ancestor block must be known.
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
diff --git a/core/blockchain.go b/core/blockchain.go
index 0e1c6cb180..365b864dc0 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -64,7 +64,6 @@ var (
headFastBlockGauge = metrics.NewRegisteredGauge("chain/head/receipt", nil)
headFinalizedBlockGauge = metrics.NewRegisteredGauge("chain/head/finalized", nil)
headSafeBlockGauge = metrics.NewRegisteredGauge("chain/head/safe", nil)
- headBaseFeeGauge = metrics.NewRegisteredGauge("chain/head/basefee", nil)
chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil)
chainMgaspsMeter = metrics.NewRegisteredResettingTimer("chain/mgasps", nil)
@@ -1230,7 +1229,9 @@ func (bc *BlockChain) writeHeadBlock(block *types.Block) {
bc.currentBlock.Store(block.Header())
headBlockGauge.Update(int64(block.NumberU64()))
- headBaseFeeGauge.TryUpdate(block.Header().BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(block.Header())
}
// stopWithoutSaving stops the blockchain service. If any imports are currently in progress
@@ -1398,7 +1399,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
bc.currentSnapBlock.Store(header)
headHeaderGauge.Update(header.Number.Int64())
headFastBlockGauge.Update(header.Number.Int64())
- headBaseFeeGauge.TryUpdate(header.BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(header)
return nil
}
// writeAncient writes blockchain and corresponding receipt chain into ancient store.
@@ -2016,7 +2019,10 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
// If we are past Byzantium, enable prefetching to pull in trie node paths
// while processing transactions. Before Byzantium the prefetcher is mostly
// useless due to the intermediate root hashing after each transaction.
- var witness *stateless.Witness
+ var (
+ witness *stateless.Witness
+ witnessStats *stateless.WitnessStats
+ )
if bc.chainConfig.IsByzantium(block.Number()) {
// Generate witnesses either if we're self-testing, or if it's the
// only block being inserted. A bit crude, but witnesses are huge,
@@ -2026,8 +2032,11 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
if err != nil {
return nil, err
}
+ if bc.cfg.VmConfig.EnableWitnessStats {
+ witnessStats = stateless.NewWitnessStats()
+ }
}
- statedb.StartPrefetcher("chain", witness)
+ statedb.StartPrefetcher("chain", witness, witnessStats)
defer statedb.StopPrefetcher()
}
@@ -2088,6 +2097,7 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
return nil, fmt.Errorf("stateless self-validation receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, block.ReceiptHash())
}
}
+
xvtime := time.Since(xvstart)
proctime := time.Since(startTime) // processing + validation + cross validation
@@ -2123,6 +2133,11 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
if err != nil {
return nil, err
}
+ // Report the collected witness statistics
+ if witnessStats != nil {
+ witnessStats.ReportMetrics()
+ }
+
// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them
@@ -2759,7 +2774,9 @@ func (bc *BlockChain) InsertHeadersBeforeCutoff(headers []*types.Header) (int, e
bc.currentSnapBlock.Store(last)
headHeaderGauge.Update(last.Number.Int64())
headFastBlockGauge.Update(last.Number.Int64())
- headBaseFeeGauge.TryUpdate(last.BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(last)
return 0, nil
}
diff --git a/core/blockchain_optimism.go b/core/blockchain_optimism.go
new file mode 100644
index 0000000000..fa7ee47390
--- /dev/null
+++ b/core/blockchain_optimism.go
@@ -0,0 +1,27 @@
+package core
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+// OPStack additions
+var (
+ headBaseFeeGauge = metrics.NewRegisteredGauge("chain/head/basefee", nil)
+ headGasUsedGauge = metrics.NewRegisteredGauge("chain/head/gas_used", nil)
+ headBlobGasUsedGauge = metrics.NewRegisteredGauge("chain/head/blob_gas_used", nil)
+
+ headGasUsedHist = metrics.NewRegisteredHistogram("chain/head/gas_used_hist", nil, metrics.NewExpDecaySample(1028, 0.015))
+ headBlobGasUsedHist = metrics.NewRegisteredHistogram("chain/head/blob_gas_used_hist", nil, metrics.NewExpDecaySample(1028, 0.015))
+)
+
+func updateOptimismBlockMetrics(header *types.Header) error {
+ headBaseFeeGauge.TryUpdate(header.BaseFee)
+ headGasUsedGauge.Update(int64(header.GasUsed))
+ headBlobGasUsedGauge.TryUpdateUint64(header.BlobGasUsed)
+ headGasUsedHist.Update(int64(header.GasUsed))
+ if header.BlobGasUsed != nil {
+ headBlobGasUsedHist.Update(int64(*header.BlobGasUsed))
+ }
+ return nil
+}
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 974e40981c..63d2294a7f 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -552,8 +552,10 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
return block, b.receipts
}
+ sdb := state.NewDatabase(trdb, nil)
+
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), state.NewDatabase(trdb, nil))
+ statedb, err := state.New(parent.Root(), sdb)
if err != nil {
panic(err)
}
diff --git a/core/filtermaps/chain_view.go b/core/filtermaps/chain_view.go
index 7c48048ad9..35c5ed22a5 100644
--- a/core/filtermaps/chain_view.go
+++ b/core/filtermaps/chain_view.go
@@ -124,19 +124,12 @@ func (cv *ChainView) RawReceipts(number uint64) types.Receipts {
// SharedRange returns the block range shared by two chain views.
func (cv *ChainView) SharedRange(cv2 *ChainView) common.Range[uint64] {
- cv.lock.Lock()
- defer cv.lock.Unlock()
-
- if cv == nil || cv2 == nil || !cv.extendNonCanonical() || !cv2.extendNonCanonical() {
+ if cv == nil || cv2 == nil {
return common.Range[uint64]{}
}
- var sharedLen uint64
- for n := min(cv.headNumber+1-uint64(len(cv.hashes)), cv2.headNumber+1-uint64(len(cv2.hashes))); n <= cv.headNumber && n <= cv2.headNumber; n++ {
- h1, h2 := cv.blockHash(n), cv2.blockHash(n)
- if h1 != h2 || h1 == (common.Hash{}) {
- break
- }
- sharedLen = n + 1
+ sharedLen := min(cv.headNumber, cv2.headNumber) + 1
+ for sharedLen > 0 && cv.BlockId(sharedLen-1) != cv2.BlockId(sharedLen-1) {
+ sharedLen--
}
return common.NewRange(0, sharedLen)
}
diff --git a/core/filtermaps/indexer.go b/core/filtermaps/indexer.go
index 3571f9f375..ca50fb466c 100644
--- a/core/filtermaps/indexer.go
+++ b/core/filtermaps/indexer.go
@@ -30,7 +30,7 @@ const (
headLogDelay = time.Second // head indexing log info delay (do not log if finished faster)
)
-// updateLoop initializes and updates the log index structure according to the
+// indexerLoop initializes and updates the log index structure according to the
// current targetView.
func (f *FilterMaps) indexerLoop() {
defer f.closeWg.Done()
@@ -221,7 +221,7 @@ func (f *FilterMaps) processSingleEvent(blocking bool) bool {
return true
}
-// setTargetView updates the target chain view of the iterator.
+// setTarget updates the target chain view of the iterator.
func (f *FilterMaps) setTarget(target targetUpdate) {
f.targetView = target.targetView
f.historyCutoff = target.historyCutoff
diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go
index af8326b52e..e3d96447ce 100644
--- a/core/forkid/forkid.go
+++ b/core/forkid/forkid.go
@@ -241,9 +241,8 @@ func checksumToBytes(hash uint32) [4]byte {
// them, one for the block number based forks and the second for the timestamps.
func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64) {
// Gather all the fork block numbers via reflection
- kind := reflect.TypeOf(params.ChainConfig{})
+ kind := reflect.TypeFor[params.ChainConfig]()
conf := reflect.ValueOf(config).Elem()
- x := uint64(0)
var (
forksByBlock []uint64
forksByTime []uint64
@@ -258,12 +257,12 @@ func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64
}
// Extract the fork rule block number or timestamp and aggregate it
- if field.Type == reflect.TypeOf(&x) {
+ if field.Type == reflect.TypeFor[*uint64]() {
if rule := conf.Field(i).Interface().(*uint64); rule != nil {
forksByTime = append(forksByTime, *rule)
}
}
- if field.Type == reflect.TypeOf(new(big.Int)) {
+ if field.Type == reflect.TypeFor[*big.Int]() {
if rule := conf.Field(i).Interface().(*big.Int); rule != nil {
forksByBlock = append(forksByBlock, rule.Uint64())
}
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
index 413e4d77a8..c78ff23cd6 100644
--- a/core/forkid/forkid_test.go
+++ b/core/forkid/forkid_test.go
@@ -76,10 +76,16 @@ func TestCreation(t *testing.T) {
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
{20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
{30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
- {40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block
+ {30000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // First Cancun block
{30000000, 1746022486, ID{Hash: checksumToBytes(0x9f3d2254), Next: 1746612311}}, // Last Cancun block
- {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // First Prague block
- {50000000, 2000000000, ID{Hash: checksumToBytes(0xc376cf8b), Next: 0}}, // Future Prague block
+ {30000000, 1746612311, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // First Prague block
+ {30000000, 1764798550, ID{Hash: checksumToBytes(0xc376cf8b), Next: 1764798551}}, // Last Prague block
+ {30000000, 1764798551, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // First Osaka block
+ {30000000, 1765290070, ID{Hash: checksumToBytes(0x5167e2a6), Next: 1765290071}}, // Last Osaka block
+ {30000000, 1765290071, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // First BPO1 block
+ {30000000, 1767747670, ID{Hash: checksumToBytes(0xcba2a1c0), Next: 1767747671}}, // Last BPO1 block
+ {30000000, 1767747671, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // First BPO2 block
+ {50000000, 2000000000, ID{Hash: checksumToBytes(0x07c9462e), Next: 0}}, // Future BPO2 block
},
},
// Sepolia test cases
@@ -95,8 +101,14 @@ func TestCreation(t *testing.T) {
{1735372, 1706655071, ID{Hash: checksumToBytes(0xf7f9bc08), Next: 1706655072}}, // Last Shanghai block
{1735372, 1706655072, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // First Cancun block
{1735372, 1741159775, ID{Hash: checksumToBytes(0x88cf81d9), Next: 1741159776}}, // Last Cancun block
- {1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // First Prague block
- {1735372, 2741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 0}}, // Future Prague block
+ {1735372, 1741159776, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // First Prague block
+ {1735372, 1760427359, ID{Hash: checksumToBytes(0xed88b5fd), Next: 1760427360}}, // Last Prague block
+ {1735372, 1760427360, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // First Osaka block
+ {1735372, 1761017183, ID{Hash: checksumToBytes(0xe2ae4999), Next: 1761017184}}, // Last Osaka block
+ {1735372, 1761017184, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // First BPO1 block
+ {1735372, 1761607007, ID{Hash: checksumToBytes(0x56078a1e), Next: 1761607008}}, // Last BPO1 block
+ {1735372, 1761607008, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // First BPO2 block
+ {1735372, 2000000000, ID{Hash: checksumToBytes(0x268956b6), Next: 0}}, // Future BPO2 block
},
},
// Holesky test cases
@@ -110,8 +122,14 @@ func TestCreation(t *testing.T) {
{123, 1707305663, ID{Hash: checksumToBytes(0xfd4f016b), Next: 1707305664}}, // Last Shanghai block
{123, 1707305664, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // First Cancun block
{123, 1740434111, ID{Hash: checksumToBytes(0x9b192ad0), Next: 1740434112}}, // Last Cancun block
- {123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // First Prague block
- {123, 2740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 0}}, // Future Prague block
+ {123, 1740434112, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // First Prague block
+ {123, 1759308479, ID{Hash: checksumToBytes(0xdfbd9bed), Next: 1759308480}}, // Last Prague block
+ {123, 1759308480, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // First Osaka block
+ {123, 1759799999, ID{Hash: checksumToBytes(0x783def52), Next: 1759800000}}, // Last Osaka block
+ {123, 1759800000, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // First BPO1 block
+ {123, 1760389823, ID{Hash: checksumToBytes(0xa280a45c), Next: 1760389824}}, // Last BPO1 block
+ {123, 1760389824, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // First BPO2 block
+ {123, 2000000000, ID{Hash: checksumToBytes(0x9bc6cb31), Next: 0}}, // Future BPO1 block
},
},
// Hoodi test cases
@@ -121,8 +139,14 @@ func TestCreation(t *testing.T) {
[]testcase{
{0, 0, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin, London, Paris, Shanghai, Cancun block
{123, 1742999831, ID{Hash: checksumToBytes(0xbef71d30), Next: 1742999832}}, // Last Cancun block
- {123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // First Prague block
- {123, 2740434112, ID{Hash: checksumToBytes(0x0929e24e), Next: 0}}, // Future Prague block
+ {123, 1742999832, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // First Prague block
+ {123, 1761677591, ID{Hash: checksumToBytes(0x0929e24e), Next: 1761677592}}, // Last Prague block
+ {123, 1761677592, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // First Osaka block
+ {123, 1762365719, ID{Hash: checksumToBytes(0xe7e0e7ff), Next: 1762365720}}, // Last Osaka block
+ {123, 1762365720, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // First BPO1 block
+ {123, 1762955543, ID{Hash: checksumToBytes(0x3893353e), Next: 1762955544}}, // Last BPO1 block
+ {123, 1762955544, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // First BPO2 block
+ {123, 2000000000, ID{Hash: checksumToBytes(0x23aa1351), Next: 0}}, // Future BPO2 block
},
},
}
@@ -144,6 +168,9 @@ func TestValidation(t *testing.T) {
legacyConfig.ShanghaiTime = nil
legacyConfig.CancunTime = nil
legacyConfig.PragueTime = nil
+ legacyConfig.OsakaTime = nil
+ legacyConfig.BPO1Time = nil
+ legacyConfig.BPO2Time = nil
tests := []struct {
config *params.ChainConfig
@@ -343,11 +370,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Shanghai, remote is random Shanghai.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
- // Local is mainnet Prague, far in the future. Remote announces Gopherium (non existing fork)
+ // Local is mainnet BPO2, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
- {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xc376cf8b), Next: 8888888888}, ErrLocalIncompatibleOrStale},
+ {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x07c9462e), Next: 8888888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
diff --git a/core/headerchain.go b/core/headerchain.go
index 4174aadef1..50b686bd8b 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -92,7 +92,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
}
hc.currentHeaderHash = hc.CurrentHeader().Hash()
headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
- headBaseFeeGauge.TryUpdate(hc.CurrentHeader().BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(hc.CurrentHeader())
return hc, nil
}
@@ -183,7 +185,9 @@ func (hc *HeaderChain) Reorg(headers []*types.Header) error {
hc.currentHeaderHash = last.Hash()
hc.currentHeader.Store(types.CopyHeader(last))
headHeaderGauge.Update(last.Number.Int64())
- headBaseFeeGauge.TryUpdate(last.BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(last)
return nil
}
@@ -486,7 +490,9 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
hc.currentHeader.Store(head)
hc.currentHeaderHash = head.Hash()
headHeaderGauge.Update(head.Number.Int64())
- headBaseFeeGauge.TryUpdate(head.BaseFee)
+
+ // OPStack addition
+ updateOptimismBlockMetrics(head)
}
type (
@@ -575,6 +581,9 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
headHeaderGauge.Update(parent.Number.Int64())
headBaseFeeGauge.TryUpdate(parent.BaseFee)
+ // OPStack addition
+ updateOptimismBlockMetrics(parent)
+
// If this is the first iteration, wipe any leftover data upwards too so
// we don't end up with dangling daps in the database
var nums []uint64
diff --git a/core/overlay/state_transition.go b/core/overlay/state_transition.go
new file mode 100644
index 0000000000..90b5c9431a
--- /dev/null
+++ b/core/overlay/state_transition.go
@@ -0,0 +1,105 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package overlay
+
+import (
+ "bytes"
+ "encoding/gob"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// TransitionState is a structure that holds the progress markers of the
+// translation process.
+type TransitionState struct {
+ CurrentAccountAddress *common.Address // addresss of the last translated account
+ CurrentSlotHash common.Hash // hash of the last translated storage slot
+ CurrentPreimageOffset int64 // next byte to read from the preimage file
+ Started, Ended bool
+
+ // Mark whether the storage for an account has been processed. This is useful if the
+ // maximum number of leaves of the conversion is reached before the whole storage is
+ // processed.
+ StorageProcessed bool
+
+ BaseRoot common.Hash // hash of the last read-only MPT base tree
+}
+
+// InTransition returns true if the translation process is in progress.
+func (ts *TransitionState) InTransition() bool {
+ return ts != nil && ts.Started && !ts.Ended
+}
+
+// Transitioned returns true if the translation process has been completed.
+func (ts *TransitionState) Transitioned() bool {
+ return ts != nil && ts.Ended
+}
+
+// Copy returns a deep copy of the TransitionState object.
+func (ts *TransitionState) Copy() *TransitionState {
+ ret := &TransitionState{
+ Started: ts.Started,
+ Ended: ts.Ended,
+ CurrentSlotHash: ts.CurrentSlotHash,
+ CurrentPreimageOffset: ts.CurrentPreimageOffset,
+ StorageProcessed: ts.StorageProcessed,
+ }
+ if ts.CurrentAccountAddress != nil {
+ addr := *ts.CurrentAccountAddress
+ ret.CurrentAccountAddress = &addr
+ }
+ return ret
+}
+
+// LoadTransitionState retrieves the Verkle transition state associated with
+// the given state root hash from the database.
+func LoadTransitionState(db ethdb.KeyValueReader, root common.Hash, isVerkle bool) *TransitionState {
+ var ts *TransitionState
+
+ data, _ := rawdb.ReadVerkleTransitionState(db, root)
+
+ // if a state could be read from the db, attempt to decode it
+ if len(data) > 0 {
+ var (
+ newts TransitionState
+ buf = bytes.NewBuffer(data[:])
+ dec = gob.NewDecoder(buf)
+ )
+ // Decode transition state
+ err := dec.Decode(&newts)
+ if err != nil {
+ log.Error("failed to decode transition state", "err", err)
+ return nil
+ }
+ ts = &newts
+ }
+
+ // Fallback that should only happen before the transition
+ if ts == nil {
+ // Initialize the first transition state, with the "ended"
+ // field set to true if the database was created
+ // as a verkle database.
+ log.Debug("no transition state found, starting fresh", "is verkle", db)
+
+ // Start with a fresh state
+ ts = &TransitionState{Ended: isVerkle}
+ }
+ return ts
+}
diff --git a/rlp/safe.go b/core/rawdb/accessors_overlay.go
similarity index 63%
rename from rlp/safe.go
rename to core/rawdb/accessors_overlay.go
index 3c910337b6..364cc889d1 100644
--- a/rlp/safe.go
+++ b/core/rawdb/accessors_overlay.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The go-ethereum Authors
+// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,14 +14,17 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:build nacl || js || !cgo
-// +build nacl js !cgo
+package rawdb
-package rlp
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
-import "reflect"
+func ReadVerkleTransitionState(db ethdb.KeyValueReader, hash common.Hash) ([]byte, error) {
+ return db.Get(transitionStateKey(hash))
+}
-// byteArrayBytes returns a slice of the byte array v.
-func byteArrayBytes(v reflect.Value, length int) []byte {
- return v.Slice(0, length).Bytes()
+func WriteVerkleTransitionState(db ethdb.KeyValueWriter, hash common.Hash, state []byte) error {
+ return db.Put(transitionStateKey(hash), state)
}
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 44f041d82e..2359fb18f1 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -119,13 +119,6 @@ func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) {
}
}
-// DeleteStateID deletes the specified state lookup from the database.
-func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) {
- if err := db.Delete(stateIDKey(root)); err != nil {
- log.Crit("Failed to delete state ID", "err", err)
- }
-}
-
// ReadPersistentStateID retrieves the id of the persistent state from the database.
func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 {
data, _ := db.Get(persistentStateIDKey)
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index a18f94b9a5..948f6e339a 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -18,13 +18,17 @@ package rawdb
import (
"bytes"
+ "context"
"errors"
"fmt"
"maps"
"os"
"path/filepath"
+ "runtime"
"slices"
"strings"
+ "sync"
+ "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -32,7 +36,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/log"
- "github.com/olekukonko/tablewriter"
+ "golang.org/x/sync/errgroup"
)
var ErrDeleteRangeInterrupted = errors.New("safe delete range operation interrupted")
@@ -216,7 +220,7 @@ type OpenOptions struct {
// Open creates a high-level database wrapper for the given key-value store.
func Open(db ethdb.KeyValueStore, opts OpenOptions) (ethdb.Database, error) {
- // Create the idle freezer instanceproto. If the given ancient directory is empty,
+ // Create the idle freezer instance. If the given ancient directory is empty,
// in-memory chain freezer is used (e.g. dev mode); otherwise the regular
// file-based freezer is created.
chainFreezerDir := opts.Ancient
@@ -363,36 +367,36 @@ func (c counter) Percentage(current uint64) string {
return fmt.Sprintf("%d", current*100/uint64(c))
}
-// stat stores sizes and count for a parameter
+// stat provides lock-free statistics aggregation using atomic operations
type stat struct {
- size common.StorageSize
- count counter
+ size uint64
+ count uint64
}
-// Add size to the stat and increase the counter by 1
-func (s *stat) Add(size common.StorageSize) {
- s.size += size
- s.count++
+func (s *stat) empty() bool {
+ return atomic.LoadUint64(&s.count) == 0
}
-func (s *stat) Size() string {
- return s.size.String()
+func (s *stat) add(size common.StorageSize) {
+ atomic.AddUint64(&s.size, uint64(size))
+ atomic.AddUint64(&s.count, 1)
}
-func (s *stat) Count() string {
- return s.count.String()
+func (s *stat) sizeString() string {
+ return common.StorageSize(atomic.LoadUint64(&s.size)).String()
+}
+
+func (s *stat) countString() string {
+ return counter(atomic.LoadUint64(&s.count)).String()
}
// InspectDatabase traverses the entire database and checks the size
// of all different categories of data.
func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
- it := db.NewIterator(keyPrefix, keyStart)
- defer it.Release()
-
var (
- count int64
- start = time.Now()
- logged = time.Now()
+ start = time.Now()
+ count atomic.Int64
+ total atomic.Uint64
// Key-value store statistics
headers stat
@@ -428,144 +432,200 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
metadata stat
unaccounted stat
- // Totals
- total common.StorageSize
-
// This map tracks example keys for unaccounted data.
// For each unique two-byte prefix, the first unaccounted key encountered
// by the iterator will be stored.
unaccountedKeys = make(map[[2]byte][]byte)
+ unaccountedMu sync.Mutex
)
- // Inspect key-value database first.
- for it.Next() {
- var (
- key = it.Key()
- size = common.StorageSize(len(key) + len(it.Value()))
- )
- total += size
- switch {
- case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
- headers.Add(size)
- case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
- bodies.Add(size)
- case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
- receipts.Add(size)
- case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
- tds.Add(size)
- case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
- numHashPairings.Add(size)
- case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
- hashNumPairings.Add(size)
- case IsLegacyTrieNode(key, it.Value()):
- legacyTries.Add(size)
- case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
- stateLookups.Add(size)
- case IsAccountTrieNode(key):
- accountTries.Add(size)
- case IsStorageTrieNode(key):
- storageTries.Add(size)
- case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
- codes.Add(size)
- case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
- txLookups.Add(size)
- case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
- accountSnaps.Add(size)
- case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
- storageSnaps.Add(size)
- case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
- preimages.Add(size)
- case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
- metadata.Add(size)
- case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
- metadata.Add(size)
- case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
- beaconHeaders.Add(size)
- case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
- cliqueSnaps.Add(size)
-
- // new log index
- case bytes.HasPrefix(key, filterMapRowPrefix) && len(key) <= len(filterMapRowPrefix)+9:
- filterMapRows.Add(size)
- case bytes.HasPrefix(key, filterMapLastBlockPrefix) && len(key) == len(filterMapLastBlockPrefix)+4:
- filterMapLastBlock.Add(size)
- case bytes.HasPrefix(key, filterMapBlockLVPrefix) && len(key) == len(filterMapBlockLVPrefix)+8:
- filterMapBlockLV.Add(size)
-
- // old log index (deprecated)
- case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
- bloomBits.Add(size)
- case bytes.HasPrefix(key, bloomBitsMetaPrefix) && len(key) < len(bloomBitsMetaPrefix)+8:
- bloomBits.Add(size)
-
- // Path-based historic state indexes
- case bytes.HasPrefix(key, StateHistoryIndexPrefix) && len(key) >= len(StateHistoryIndexPrefix)+common.HashLength:
- stateIndex.Add(size)
-
- // Verkle trie data is detected, determine the sub-category
- case bytes.HasPrefix(key, VerklePrefix):
- remain := key[len(VerklePrefix):]
+
+ inspectRange := func(ctx context.Context, r byte) error {
+ var s []byte
+ if len(keyStart) > 0 {
switch {
- case IsAccountTrieNode(remain):
- verkleTries.Add(size)
- case bytes.HasPrefix(remain, stateIDPrefix) && len(remain) == len(stateIDPrefix)+common.HashLength:
- verkleStateLookups.Add(size)
- case bytes.Equal(remain, persistentStateIDKey):
- metadata.Add(size)
- case bytes.Equal(remain, trieJournalKey):
- metadata.Add(size)
- case bytes.Equal(remain, snapSyncStatusFlagKey):
- metadata.Add(size)
+ case r < keyStart[0]:
+ return nil
+ case r == keyStart[0]:
+ s = keyStart[1:]
default:
- unaccounted.Add(size)
+ // entire key range is included for inspection
}
+ }
+ it := db.NewIterator(append(keyPrefix, r), s)
+ defer it.Release()
- // Metadata keys
- case slices.ContainsFunc(knownMetadataKeys, func(x []byte) bool { return bytes.Equal(x, key) }):
- metadata.Add(size)
+ for it.Next() {
+ var (
+ key = it.Key()
+ size = common.StorageSize(len(key) + len(it.Value()))
+ )
+ total.Add(uint64(size))
+ count.Add(1)
- default:
- unaccounted.Add(size)
- if len(key) >= 2 {
- prefix := [2]byte(key[:2])
- if _, ok := unaccountedKeys[prefix]; !ok {
- unaccountedKeys[prefix] = bytes.Clone(key)
+ switch {
+ case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
+ headers.add(size)
+ case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
+ bodies.add(size)
+ case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
+ receipts.add(size)
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
+ tds.add(size)
+ case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
+ numHashPairings.add(size)
+ case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
+ hashNumPairings.add(size)
+ case IsLegacyTrieNode(key, it.Value()):
+ legacyTries.add(size)
+ case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
+ stateLookups.add(size)
+ case IsAccountTrieNode(key):
+ accountTries.add(size)
+ case IsStorageTrieNode(key):
+ storageTries.add(size)
+ case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
+ codes.add(size)
+ case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
+ txLookups.add(size)
+ case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
+ accountSnaps.add(size)
+ case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
+ storageSnaps.add(size)
+ case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
+ preimages.add(size)
+ case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
+ metadata.add(size)
+ case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
+ metadata.add(size)
+ case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
+ beaconHeaders.add(size)
+ case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength:
+ cliqueSnaps.add(size)
+
+ // new log index
+ case bytes.HasPrefix(key, filterMapRowPrefix) && len(key) <= len(filterMapRowPrefix)+9:
+ filterMapRows.add(size)
+ case bytes.HasPrefix(key, filterMapLastBlockPrefix) && len(key) == len(filterMapLastBlockPrefix)+4:
+ filterMapLastBlock.add(size)
+ case bytes.HasPrefix(key, filterMapBlockLVPrefix) && len(key) == len(filterMapBlockLVPrefix)+8:
+ filterMapBlockLV.add(size)
+
+ // old log index (deprecated)
+ case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
+ bloomBits.add(size)
+ case bytes.HasPrefix(key, bloomBitsMetaPrefix) && len(key) < len(bloomBitsMetaPrefix)+8:
+ bloomBits.add(size)
+
+ // Path-based historic state indexes
+ case bytes.HasPrefix(key, StateHistoryIndexPrefix) && len(key) >= len(StateHistoryIndexPrefix)+common.HashLength:
+ stateIndex.add(size)
+
+ // Verkle trie data is detected, determine the sub-category
+ case bytes.HasPrefix(key, VerklePrefix):
+ remain := key[len(VerklePrefix):]
+ switch {
+ case IsAccountTrieNode(remain):
+ verkleTries.add(size)
+ case bytes.HasPrefix(remain, stateIDPrefix) && len(remain) == len(stateIDPrefix)+common.HashLength:
+ verkleStateLookups.add(size)
+ case bytes.Equal(remain, persistentStateIDKey):
+ metadata.add(size)
+ case bytes.Equal(remain, trieJournalKey):
+ metadata.add(size)
+ case bytes.Equal(remain, snapSyncStatusFlagKey):
+ metadata.add(size)
+ default:
+ unaccounted.add(size)
+ }
+
+ // Metadata keys
+ case slices.ContainsFunc(knownMetadataKeys, func(x []byte) bool { return bytes.Equal(x, key) }):
+ metadata.add(size)
+
+ default:
+ unaccounted.add(size)
+ if len(key) >= 2 {
+ prefix := [2]byte(key[:2])
+ unaccountedMu.Lock()
+ if _, ok := unaccountedKeys[prefix]; !ok {
+ unaccountedKeys[prefix] = bytes.Clone(key)
+ }
+ unaccountedMu.Unlock()
}
}
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
}
- count++
- if count%1000 == 0 && time.Since(logged) > 8*time.Second {
- log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
- logged = time.Now()
+
+ return it.Error()
+ }
+
+ var (
+ eg, ctx = errgroup.WithContext(context.Background())
+ workers = runtime.NumCPU()
+ )
+ eg.SetLimit(workers)
+
+ // Progress reporter
+ done := make(chan struct{})
+ go func() {
+ ticker := time.NewTicker(8 * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ log.Info("Inspecting database", "count", count.Load(), "size", common.StorageSize(total.Load()), "elapsed", common.PrettyDuration(time.Since(start)))
+ case <-done:
+ return
+ }
}
+ }()
+
+ // Inspect key-value database in parallel.
+ for i := 0; i < 256; i++ {
+ eg.Go(func() error { return inspectRange(ctx, byte(i)) })
+ }
+
+ if err := eg.Wait(); err != nil {
+ close(done)
+ return err
}
+ close(done)
+
// Display the database statistic of key-value store.
stats := [][]string{
- {"Key-Value store", "Headers", headers.Size(), headers.Count()},
- {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
- {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
- {"Key-Value store", "Difficulties (deprecated)", tds.Size(), tds.Count()},
- {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
- {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
- {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
- {"Key-Value store", "Log index filter-map rows", filterMapRows.Size(), filterMapRows.Count()},
- {"Key-Value store", "Log index last-block-of-map", filterMapLastBlock.Size(), filterMapLastBlock.Count()},
- {"Key-Value store", "Log index block-lv", filterMapBlockLV.Size(), filterMapBlockLV.Count()},
- {"Key-Value store", "Log bloombits (deprecated)", bloomBits.Size(), bloomBits.Count()},
- {"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
- {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()},
- {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()},
- {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()},
- {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()},
- {"Key-Value store", "Path state history indexes", stateIndex.Size(), stateIndex.Count()},
- {"Key-Value store", "Verkle trie nodes", verkleTries.Size(), verkleTries.Count()},
- {"Key-Value store", "Verkle trie state lookups", verkleStateLookups.Size(), verkleStateLookups.Count()},
- {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
- {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
- {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
- {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
- {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
- {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
+ {"Key-Value store", "Headers", headers.sizeString(), headers.countString()},
+ {"Key-Value store", "Bodies", bodies.sizeString(), bodies.countString()},
+ {"Key-Value store", "Receipt lists", receipts.sizeString(), receipts.countString()},
+ {"Key-Value store", "Difficulties (deprecated)", tds.sizeString(), tds.countString()},
+ {"Key-Value store", "Block number->hash", numHashPairings.sizeString(), numHashPairings.countString()},
+ {"Key-Value store", "Block hash->number", hashNumPairings.sizeString(), hashNumPairings.countString()},
+ {"Key-Value store", "Transaction index", txLookups.sizeString(), txLookups.countString()},
+ {"Key-Value store", "Log index filter-map rows", filterMapRows.sizeString(), filterMapRows.countString()},
+ {"Key-Value store", "Log index last-block-of-map", filterMapLastBlock.sizeString(), filterMapLastBlock.countString()},
+ {"Key-Value store", "Log index block-lv", filterMapBlockLV.sizeString(), filterMapBlockLV.countString()},
+ {"Key-Value store", "Log bloombits (deprecated)", bloomBits.sizeString(), bloomBits.countString()},
+ {"Key-Value store", "Contract codes", codes.sizeString(), codes.countString()},
+ {"Key-Value store", "Hash trie nodes", legacyTries.sizeString(), legacyTries.countString()},
+ {"Key-Value store", "Path trie state lookups", stateLookups.sizeString(), stateLookups.countString()},
+ {"Key-Value store", "Path trie account nodes", accountTries.sizeString(), accountTries.countString()},
+ {"Key-Value store", "Path trie storage nodes", storageTries.sizeString(), storageTries.countString()},
+ {"Key-Value store", "Path state history indexes", stateIndex.sizeString(), stateIndex.countString()},
+ {"Key-Value store", "Verkle trie nodes", verkleTries.sizeString(), verkleTries.countString()},
+ {"Key-Value store", "Verkle trie state lookups", verkleStateLookups.sizeString(), verkleStateLookups.countString()},
+ {"Key-Value store", "Trie preimages", preimages.sizeString(), preimages.countString()},
+ {"Key-Value store", "Account snapshot", accountSnaps.sizeString(), accountSnaps.countString()},
+ {"Key-Value store", "Storage snapshot", storageSnaps.sizeString(), storageSnaps.countString()},
+ {"Key-Value store", "Beacon sync headers", beaconHeaders.sizeString(), beaconHeaders.countString()},
+ {"Key-Value store", "Clique snapshots", cliqueSnaps.sizeString(), cliqueSnaps.countString()},
+ {"Key-Value store", "Singleton metadata", metadata.sizeString(), metadata.countString()},
}
+
// Inspect all registered append-only file store then.
ancients, err := inspectFreezers(db)
if err != nil {
@@ -580,16 +640,17 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
fmt.Sprintf("%d", ancient.count()),
})
}
- total += ancient.size()
+ total.Add(uint64(ancient.size()))
}
- table := tablewriter.NewWriter(os.Stdout)
+
+ table := newTableWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
- table.SetFooter([]string{"", "Total", total.String(), " "})
+ table.SetFooter([]string{"", "Total", common.StorageSize(total.Load()).String(), fmt.Sprintf("%d", count.Load())})
table.AppendBulk(stats)
table.Render()
- if unaccounted.size > 0 {
- log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
+ if !unaccounted.empty() {
+ log.Error("Database contains unaccounted data", "size", unaccounted.sizeString(), "count", unaccounted.countString())
for _, e := range slices.SortedFunc(maps.Values(unaccountedKeys), bytes.Compare) {
log.Error(fmt.Sprintf(" example key: %x", e))
}
@@ -604,7 +665,7 @@ var knownMetadataKeys = [][]byte{
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey,
- filterMapsRangeKey, headStateHistoryIndexKey,
+ filterMapsRangeKey, headStateHistoryIndexKey, VerkleTransitionStatePrefix,
}
// printChainMetadata prints out chain metadata to stderr.
diff --git a/core/rawdb/database_tablewriter_tinygo.go b/core/rawdb/database_tablewriter_tinygo.go
new file mode 100644
index 0000000000..2f8e456fd5
--- /dev/null
+++ b/core/rawdb/database_tablewriter_tinygo.go
@@ -0,0 +1,208 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// TODO: naive stub implementation for tablewriter
+
+//go:build tinygo
+// +build tinygo
+
+package rawdb
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+const (
+ cellPadding = 1 // Number of spaces on each side of cell content
+ totalPadding = 2 * cellPadding // Total padding per cell. Its two because we pad equally on both sides
+)
+
+type Table struct {
+ out io.Writer
+ headers []string
+ footer []string
+ rows [][]string
+}
+
+func newTableWriter(w io.Writer) *Table {
+ return &Table{out: w}
+}
+
+// SetHeader sets the header row for the table. Headers define the column names
+// and determine the number of columns for the entire table.
+//
+// All data rows and footer must have the same number of columns as the headers.
+//
+// Note: Headers are required - tables without headers will fail validation.
+func (t *Table) SetHeader(headers []string) {
+ t.headers = headers
+}
+
+// SetFooter sets an optional footer row for the table.
+//
+// The footer must have the same number of columns as the headers, or validation will fail.
+func (t *Table) SetFooter(footer []string) {
+ t.footer = footer
+}
+
+// AppendBulk sets all data rows for the table at once, replacing any existing rows.
+//
+// Each row must have the same number of columns as the headers, or validation
+// will fail during Render().
+func (t *Table) AppendBulk(rows [][]string) {
+ t.rows = rows
+}
+
+// Render outputs the complete table to the configured writer. The table is rendered
+// with headers, data rows, and optional footer.
+//
+// If validation fails, an error message is written to the output and rendering stops.
+func (t *Table) Render() {
+ if err := t.render(); err != nil {
+ fmt.Fprintf(t.out, "Error: %v\n", err)
+ return
+ }
+}
+
+func (t *Table) render() error {
+ if err := t.validateColumnCount(); err != nil {
+ return err
+ }
+
+ widths := t.calculateColumnWidths()
+ rowSeparator := t.buildRowSeparator(widths)
+
+ if len(t.headers) > 0 {
+ t.printRow(t.headers, widths)
+ fmt.Fprintln(t.out, rowSeparator)
+ }
+
+ for _, row := range t.rows {
+ t.printRow(row, widths)
+ }
+
+ if len(t.footer) > 0 {
+ fmt.Fprintln(t.out, rowSeparator)
+ t.printRow(t.footer, widths)
+ }
+
+ return nil
+}
+
+// validateColumnCount checks that all rows and footer match the header column count
+func (t *Table) validateColumnCount() error {
+ if len(t.headers) == 0 {
+ return errors.New("table must have headers")
+ }
+
+ expectedCols := len(t.headers)
+
+ // Check all rows have same column count as headers
+ for i, row := range t.rows {
+ if len(row) != expectedCols {
+ return fmt.Errorf("row %d has %d columns, expected %d", i, len(row), expectedCols)
+ }
+ }
+
+ // Check footer has same column count as headers (if present)
+ footerPresent := len(t.footer) > 0
+ if footerPresent && len(t.footer) != expectedCols {
+ return fmt.Errorf("footer has %d columns, expected %d", len(t.footer), expectedCols)
+ }
+
+ return nil
+}
+
+// calculateColumnWidths determines the minimum width needed for each column.
+//
+// This is done by finding the longest content in each column across headers, rows, and footer.
+//
+// Returns an int slice where widths[i] is the display width for column i (including padding).
+func (t *Table) calculateColumnWidths() []int {
+ // Headers define the number of columns
+ cols := len(t.headers)
+ if cols == 0 {
+ return nil
+ }
+
+ // Track maximum content width for each column (before padding)
+ widths := make([]int, cols)
+
+ // Start with header widths
+ for i, h := range t.headers {
+ widths[i] = len(h)
+ }
+
+ // Find max width needed for data cells in each column
+ for _, row := range t.rows {
+ for i, cell := range row {
+ widths[i] = max(widths[i], len(cell))
+ }
+ }
+
+ // Find max width needed for footer in each column
+ for i, f := range t.footer {
+ widths[i] = max(widths[i], len(f))
+ }
+
+ for i := range widths {
+ widths[i] += totalPadding
+ }
+
+ return widths
+}
+
+// buildRowSeparator creates a horizontal line to separate table rows.
+//
+// It generates a string with dashes (-) for each column width, joined by plus signs (+).
+//
+// Example output: "----------+--------+-----------"
+func (t *Table) buildRowSeparator(widths []int) string {
+ parts := make([]string, len(widths))
+ for i, w := range widths {
+ parts[i] = strings.Repeat("-", w)
+ }
+ return strings.Join(parts, "+")
+}
+
+// printRow outputs a single row to the table writer.
+//
+// Each cell is padded with spaces and separated by pipe characters (|).
+//
+// Example output: " Database | Size | Items "
+func (t *Table) printRow(row []string, widths []int) {
+ for i, cell := range row {
+ if i > 0 {
+ fmt.Fprint(t.out, "|")
+ }
+
+ // Calculate centering pad without padding
+ contentWidth := widths[i] - totalPadding
+ cellLen := len(cell)
+ leftPadCentering := (contentWidth - cellLen) / 2
+ rightPadCentering := contentWidth - cellLen - leftPadCentering
+
+ // Build padded cell with centering
+ leftPadding := strings.Repeat(" ", cellPadding+leftPadCentering)
+ rightPadding := strings.Repeat(" ", cellPadding+rightPadCentering)
+
+ fmt.Fprintf(t.out, "%s%s%s", leftPadding, cell, rightPadding)
+ }
+ fmt.Fprintln(t.out)
+}
diff --git a/core/rawdb/database_tablewriter_tinygo_test.go b/core/rawdb/database_tablewriter_tinygo_test.go
new file mode 100644
index 0000000000..3bcf93832b
--- /dev/null
+++ b/core/rawdb/database_tablewriter_tinygo_test.go
@@ -0,0 +1,124 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build tinygo
+// +build tinygo
+
+package rawdb
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestTableWriterTinyGo(t *testing.T) {
+ var buf bytes.Buffer
+ table := newTableWriter(&buf)
+
+ headers := []string{"Database", "Size", "Items", "Status"}
+ rows := [][]string{
+ {"chaindata", "2.5 GB", "1,234,567", "Active"},
+ {"state", "890 MB", "456,789", "Active"},
+ {"ancient", "15.2 GB", "2,345,678", "Readonly"},
+ {"logs", "120 MB", "89,012", "Active"},
+ }
+ footer := []string{"Total", "18.71 GB", "4,125,046", "-"}
+
+ table.SetHeader(headers)
+ table.AppendBulk(rows)
+ table.SetFooter(footer)
+ table.Render()
+
+ output := buf.String()
+ t.Logf("Table output using custom stub implementation:\n%s", output)
+}
+
+func TestTableWriterValidationErrors(t *testing.T) {
+ // Test missing headers
+ t.Run("MissingHeaders", func(t *testing.T) {
+ var buf bytes.Buffer
+ table := newTableWriter(&buf)
+
+ rows := [][]string{{"x", "y", "z"}}
+
+ table.AppendBulk(rows)
+ table.Render()
+
+ output := buf.String()
+ if !strings.Contains(output, "table must have headers") {
+ t.Errorf("Expected error for missing headers, got: %s", output)
+ }
+ })
+
+ t.Run("NotEnoughRowColumns", func(t *testing.T) {
+ var buf bytes.Buffer
+ table := newTableWriter(&buf)
+
+ headers := []string{"A", "B", "C"}
+ badRows := [][]string{
+ {"x", "y"}, // Missing column
+ }
+
+ table.SetHeader(headers)
+ table.AppendBulk(badRows)
+ table.Render()
+
+ output := buf.String()
+ if !strings.Contains(output, "row 0 has 2 columns, expected 3") {
+ t.Errorf("Expected validation error for row 0, got: %s", output)
+ }
+ })
+
+ t.Run("TooManyRowColumns", func(t *testing.T) {
+ var buf bytes.Buffer
+ table := newTableWriter(&buf)
+
+ headers := []string{"A", "B", "C"}
+ badRows := [][]string{
+ {"p", "q", "r", "s"}, // Extra column
+ }
+
+ table.SetHeader(headers)
+ table.AppendBulk(badRows)
+ table.Render()
+
+ output := buf.String()
+ if !strings.Contains(output, "row 0 has 4 columns, expected 3") {
+ t.Errorf("Expected validation error for row 0, got: %s", output)
+ }
+ })
+
+ // Test mismatched footer columns
+ t.Run("MismatchedFooterColumns", func(t *testing.T) {
+ var buf bytes.Buffer
+ table := newTableWriter(&buf)
+
+ headers := []string{"A", "B", "C"}
+ rows := [][]string{{"x", "y", "z"}}
+ footer := []string{"total", "sum"} // Missing column
+
+ table.SetHeader(headers)
+ table.AppendBulk(rows)
+ table.SetFooter(footer)
+ table.Render()
+
+ output := buf.String()
+ if !strings.Contains(output, "footer has 2 columns, expected 3") {
+ t.Errorf("Expected validation error for footer, got: %s", output)
+ }
+ })
+}
diff --git a/rlp/unsafe.go b/core/rawdb/database_tablewriter_unix.go
similarity index 71%
rename from rlp/unsafe.go
rename to core/rawdb/database_tablewriter_unix.go
index 10868caaf2..8bec5396e8 100644
--- a/rlp/unsafe.go
+++ b/core/rawdb/database_tablewriter_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2021 The go-ethereum Authors
+// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,17 +14,20 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:build !nacl && !js && cgo
-// +build !nacl,!js,cgo
+//go:build !tinygo
+// +build !tinygo
-package rlp
+package rawdb
import (
- "reflect"
- "unsafe"
+ "io"
+
+ "github.com/olekukonko/tablewriter"
)
-// byteArrayBytes returns a slice of the byte array v.
-func byteArrayBytes(v reflect.Value, length int) []byte {
- return unsafe.Slice((*byte)(unsafe.Pointer(v.UnsafeAddr())), length)
+// Re-export the real tablewriter types and functions
+type Table = tablewriter.Table
+
+func newTableWriter(w io.Writer) *Table {
+ return tablewriter.NewWriter(w)
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 388a08f243..3588063468 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -158,6 +158,9 @@ var (
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitsCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
preimageMissCounter = metrics.NewRegisteredCounter("db/preimage/miss", nil)
+
+ // Verkle transition information
+ VerkleTransitionStatePrefix = []byte("verkle-transition-state-")
)
// LegacyTxLookupEntry is the legacy TxLookupEntry definition with some unnecessary
@@ -381,19 +384,51 @@ func accountHistoryIndexKey(addressHash common.Hash) []byte {
// storageHistoryIndexKey = StateHistoryStorageMetadataPrefix + addressHash + storageHash
func storageHistoryIndexKey(addressHash common.Hash, storageHash common.Hash) []byte {
- return append(append(StateHistoryStorageMetadataPrefix, addressHash.Bytes()...), storageHash.Bytes()...)
+ totalLen := len(StateHistoryStorageMetadataPrefix) + 2*common.HashLength
+ out := make([]byte, totalLen)
+
+ off := 0
+ off += copy(out[off:], StateHistoryStorageMetadataPrefix)
+ off += copy(out[off:], addressHash.Bytes())
+ copy(out[off:], storageHash.Bytes())
+
+ return out
}
// accountHistoryIndexBlockKey = StateHistoryAccountBlockPrefix + addressHash + blockID
func accountHistoryIndexBlockKey(addressHash common.Hash, blockID uint32) []byte {
- var buf [4]byte
- binary.BigEndian.PutUint32(buf[:], blockID)
- return append(append(StateHistoryAccountBlockPrefix, addressHash.Bytes()...), buf[:]...)
+ var buf4 [4]byte
+ binary.BigEndian.PutUint32(buf4[:], blockID)
+
+ totalLen := len(StateHistoryAccountBlockPrefix) + common.HashLength + 4
+ out := make([]byte, totalLen)
+
+ off := 0
+ off += copy(out[off:], StateHistoryAccountBlockPrefix)
+ off += copy(out[off:], addressHash.Bytes())
+ copy(out[off:], buf4[:])
+
+ return out
}
// storageHistoryIndexBlockKey = StateHistoryStorageBlockPrefix + addressHash + storageHash + blockID
func storageHistoryIndexBlockKey(addressHash common.Hash, storageHash common.Hash, blockID uint32) []byte {
- var buf [4]byte
- binary.BigEndian.PutUint32(buf[:], blockID)
- return append(append(append(StateHistoryStorageBlockPrefix, addressHash.Bytes()...), storageHash.Bytes()...), buf[:]...)
+ var buf4 [4]byte
+ binary.BigEndian.PutUint32(buf4[:], blockID)
+
+ totalLen := len(StateHistoryStorageBlockPrefix) + 2*common.HashLength + 4
+ out := make([]byte, totalLen)
+
+ off := 0
+ off += copy(out[off:], StateHistoryStorageBlockPrefix)
+ off += copy(out[off:], addressHash.Bytes())
+ off += copy(out[off:], storageHash.Bytes())
+ copy(out[off:], buf4[:])
+
+ return out
+}
+
+// transitionStateKey = transitionStatusKey + hash
+func transitionStateKey(hash common.Hash) []byte {
+ return append(VerkleTransitionStatePrefix, hash.Bytes()...)
}
diff --git a/core/state/database.go b/core/state/database.go
index 6f9250a0c6..5fff5c4c89 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
@@ -80,11 +81,19 @@ type Trie interface {
// be returned.
GetAccount(address common.Address) (*types.StateAccount, error)
+ // PrefetchAccount attempts to resolve specific accounts from the database
+ // to accelerate subsequent trie operations.
+ PrefetchAccount([]common.Address) error
+
// GetStorage returns the value for key stored in the trie. The value bytes
// must not be modified by the caller. If a node was not found in the database,
// a trie.MissingNodeError is returned.
GetStorage(addr common.Address, key []byte) ([]byte, error)
+ // PrefetchStorage attempts to resolve specific storage slots from the database
+ // to accelerate subsequent trie operations.
+ PrefetchStorage(addr common.Address, keys [][]byte) error
+
// UpdateAccount abstracts an account write to the trie. It encodes the
// provided account object with associated algorithm and then updates it
// in the trie with provided address.
@@ -121,7 +130,7 @@ type Trie interface {
// Witness returns a set containing all trie nodes that have been accessed.
// The returned map could be nil if the witness is empty.
- Witness() map[string]struct{}
+ Witness() map[string][]byte
// NodeIterator returns an iterator that returns nodes of the trie. Iteration
// starts at the key after the given start key. And error will be returned
@@ -151,17 +160,21 @@ type CachingDB struct {
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
codeSizeCache *lru.Cache[common.Hash, int]
pointCache *utils.PointCache
+
+ // Transition-specific fields
+ TransitionStatePerRoot *lru.Cache[common.Hash, *overlay.TransitionState]
}
// NewDatabase creates a state database with the provided data sources.
func NewDatabase(triedb *triedb.Database, snap *snapshot.Tree) *CachingDB {
return &CachingDB{
- disk: triedb.Disk(),
- triedb: triedb,
- snap: snap,
- codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- pointCache: utils.NewPointCache(pointCacheSize),
+ disk: triedb.Disk(),
+ triedb: triedb,
+ snap: snap,
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ pointCache: utils.NewPointCache(pointCacheSize),
+ TransitionStatePerRoot: lru.NewCache[common.Hash, *overlay.TransitionState](1000),
}
}
@@ -224,7 +237,13 @@ func (db *CachingDB) ReadersWithCacheStats(stateRoot common.Hash) (ReaderWithSta
// OpenTrie opens the main account trie at a specific root hash.
func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
if db.triedb.IsVerkle() {
- return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
+ ts := overlay.LoadTransitionState(db.TrieDB().Disk(), root, db.triedb.IsVerkle())
+ if ts.InTransition() {
+ panic("transition isn't supported yet")
+ }
+ if ts.Transitioned() {
+ return trie.NewVerkleTrie(root, db.triedb, db.pointCache)
+ }
}
tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb)
if err != nil {
@@ -235,9 +254,6 @@ func (db *CachingDB) OpenTrie(root common.Hash) (Trie, error) {
// OpenStorageTrie opens the storage trie of an account.
func (db *CachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
- // In the verkle case, there is only one tree. But the two-tree structure
- // is hardcoded in the codebase. So we need to return the same trie in this
- // case.
if db.triedb.IsVerkle() {
return self, nil
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index a63307e38d..2bfe3d06af 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -160,11 +160,8 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
var eta time.Duration // Realistically will never remain uninited
if done := binary.BigEndian.Uint64(key[:8]); done > 0 {
- var (
- left = math.MaxUint64 - binary.BigEndian.Uint64(key[:8])
- speed = done/uint64(time.Since(pstart)/time.Millisecond+1) + 1 // +1s to avoid division by zero
- )
- eta = time.Duration(left/speed) * time.Millisecond
+ left := math.MaxUint64 - binary.BigEndian.Uint64(key[:8])
+ eta = common.CalculateETA(done, left, time.Since(pstart))
}
if time.Since(logged) > 8*time.Second {
log.Info("Pruning state data", "nodes", count, "skipped", skipped, "size", size,
diff --git a/core/state/reader.go b/core/state/reader.go
index fe24831a6b..b2a0d14855 100644
--- a/core/state/reader.go
+++ b/core/state/reader.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
+ "github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -231,7 +232,7 @@ type trieReader struct {
lock sync.Mutex // Lock for protecting concurrent read
}
-// trieReader constructs a trie reader of the specific state. An error will be
+// newTrieReader constructs a trie reader of the specific state. An error will be
// returned if the associated trie specified by root is not existent.
func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCache) (*trieReader, error) {
var (
@@ -242,6 +243,18 @@ func newTrieReader(root common.Hash, db *triedb.Database, cache *utils.PointCach
tr, err = trie.NewStateTrie(trie.StateTrieID(root), db)
} else {
tr, err = trie.NewVerkleTrie(root, db, cache)
+
+ // Based on the transition status, determine if the overlay
+ // tree needs to be created, or if a single, target tree is
+ // to be picked.
+ ts := overlay.LoadTransitionState(db.Disk(), root, true)
+ if ts.InTransition() {
+ mpt, err := trie.NewStateTrie(trie.StateTrieID(ts.BaseRoot), db)
+ if err != nil {
+ return nil, err
+ }
+ tr = trie.NewTransitionTrie(mpt, tr.(*trie.VerkleTrie), false)
+ }
}
if err != nil {
return nil, err
diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go
index 4b0774f2ae..0d39687be4 100644
--- a/core/state/snapshot/conversion.go
+++ b/core/state/snapshot/conversion.go
@@ -171,20 +171,16 @@ func (stat *generateStats) report() {
// If there's progress on the account trie, estimate the time to finish crawling it
if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 {
var (
- left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
- speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
- eta = time.Duration(left/speed) * time.Millisecond
+ left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
+ eta = common.CalculateETA(done, left, time.Since(stat.start))
)
// If there are large contract crawls in progress, estimate their finish time
for acc, head := range stat.slotsHead {
start := stat.slotsStart[acc]
if done := binary.BigEndian.Uint64(head[:8]); done > 0 {
- var (
- left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
- speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
- )
+ left := math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
// Override the ETA if larger than the largest until now
- if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA {
+ if slotETA := common.CalculateETA(done, left, time.Since(start)); eta < slotETA {
eta = slotETA
}
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 59861b7072..33bfff0ebe 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -135,7 +135,8 @@ type StateDB struct {
journal *journal
// State witness if cross validation is needed
- witness *stateless.Witness
+ witness *stateless.Witness
+ witnessStats *stateless.WitnessStats
// Measurements gathered during execution for debugging purposes
AccountReads time.Duration
@@ -197,12 +198,13 @@ func (s *StateDB) MakeSinglethreaded() {
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
-func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness) {
+func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness, witnessStats *stateless.WitnessStats) {
// Terminate any previously running prefetcher
s.StopPrefetcher()
// Enable witness collection if requested
s.witness = witness
+ s.witnessStats = witnessStats
// With the switch to the Proof-of-Stake consensus algorithm, block production
// rewards are now handled at the consensus layer. Consequently, a block may
@@ -898,9 +900,17 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
- s.witness.AddState(trie.Witness())
+ witness := trie.Witness()
+ s.witness.AddState(witness)
+ if s.witnessStats != nil {
+ s.witnessStats.Add(witness, obj.addrHash)
+ }
} else if obj.trie != nil {
- s.witness.AddState(obj.trie.Witness())
+ witness := obj.trie.Witness()
+ s.witness.AddState(witness)
+ if s.witnessStats != nil {
+ s.witnessStats.Add(witness, obj.addrHash)
+ }
}
}
// Pull in only-read and non-destructed trie witnesses
@@ -914,9 +924,17 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
continue
}
if trie := obj.getPrefetchedTrie(); trie != nil {
- s.witness.AddState(trie.Witness())
+ witness := trie.Witness()
+ s.witness.AddState(witness)
+ if s.witnessStats != nil {
+ s.witnessStats.Add(witness, obj.addrHash)
+ }
} else if obj.trie != nil {
- s.witness.AddState(obj.trie.Witness())
+ witness := obj.trie.Witness()
+ s.witness.AddState(witness)
+ if s.witnessStats != nil {
+ s.witnessStats.Add(witness, obj.addrHash)
+ }
}
}
}
@@ -982,7 +1000,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
- s.witness.AddState(s.trie.Witness())
+ witness := s.trie.Witness()
+ s.witness.AddState(witness)
+ if s.witnessStats != nil {
+ s.witnessStats.Add(witness, common.Hash{})
+ }
}
return hash
}
@@ -1017,7 +1039,7 @@ func (s *StateDB) fastDeleteStorage(snaps *snapshot.Tree, addrHash common.Hash,
storageOrigins = make(map[common.Hash][]byte) // the set for tracking the original value of slot
)
stack := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) {
- nodes.AddNode(path, trienode.NewDeleted())
+ nodes.AddNode(path, trienode.NewDeletedWithPrev(blob))
})
for iter.Next() {
slot := common.CopyBytes(iter.Slot())
@@ -1068,7 +1090,7 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r
if it.Hash() == (common.Hash{}) {
continue
}
- nodes.AddNode(it.Path(), trienode.NewDeleted())
+ nodes.AddNode(it.Path(), trienode.NewDeletedWithPrev(it.NodeBlob()))
}
if err := it.Error(); err != nil {
return nil, nil, nil, err
@@ -1200,7 +1222,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
//
// Given that some accounts may be destroyed and then recreated within
// the same block, it's possible that a node set with the same owner
- // may already exists. In such cases, these two sets are combined, with
+ // may already exist. In such cases, these two sets are combined, with
// the later one overwriting the previous one if any nodes are modified
// or deleted in both sets.
//
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 6f492cf9f2..a9faddcdff 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -388,6 +388,10 @@ func (sf *subfetcher) loop() {
sf.tasks = nil
sf.lock.Unlock()
+ var (
+ addresses []common.Address
+ slots [][]byte
+ )
for _, task := range tasks {
if task.addr != nil {
key := *task.addr
@@ -400,6 +404,7 @@ func (sf *subfetcher) loop() {
sf.dupsCross++
continue
}
+ sf.seenReadAddr[key] = struct{}{}
} else {
if _, ok := sf.seenReadAddr[key]; ok {
sf.dupsCross++
@@ -409,7 +414,9 @@ func (sf *subfetcher) loop() {
sf.dupsWrite++
continue
}
+ sf.seenWriteAddr[key] = struct{}{}
}
+ addresses = append(addresses, *task.addr)
} else {
key := *task.slot
if task.read {
@@ -421,6 +428,7 @@ func (sf *subfetcher) loop() {
sf.dupsCross++
continue
}
+ sf.seenReadSlot[key] = struct{}{}
} else {
if _, ok := sf.seenReadSlot[key]; ok {
sf.dupsCross++
@@ -430,25 +438,19 @@ func (sf *subfetcher) loop() {
sf.dupsWrite++
continue
}
+ sf.seenWriteSlot[key] = struct{}{}
}
+ slots = append(slots, key.Bytes())
}
- if task.addr != nil {
- sf.trie.GetAccount(*task.addr)
- } else {
- sf.trie.GetStorage(sf.addr, (*task.slot)[:])
+ }
+ if len(addresses) != 0 {
+ if err := sf.trie.PrefetchAccount(addresses); err != nil {
+ log.Error("Failed to prefetch accounts", "err", err)
}
- if task.read {
- if task.addr != nil {
- sf.seenReadAddr[*task.addr] = struct{}{}
- } else {
- sf.seenReadSlot[*task.slot] = struct{}{}
- }
- } else {
- if task.addr != nil {
- sf.seenWriteAddr[*task.addr] = struct{}{}
- } else {
- sf.seenWriteSlot[*task.slot] = struct{}{}
- }
+ }
+ if len(slots) != 0 {
+ if err := sf.trie.PrefetchStorage(sf.addr, slots); err != nil {
+ log.Error("Failed to prefetch storage", "err", err)
}
}
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index 45e098070a..2e55b89dfe 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -111,12 +111,6 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
fails.Add(1)
return nil // Ugh, something went horribly wrong, bail out
}
- // Pre-load trie nodes for the intermediate root.
- //
- // This operation incurs significant memory allocations due to
- // trie hashing and node decoding. TODO(rjl493456442): investigate
- // ways to mitigate this overhead.
- stateCpy.IntermediateRoot(true)
return nil
})
}
diff --git a/core/state_processor.go b/core/state_processor.go
index ba4e60f91d..c29d0c24de 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -113,15 +113,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
requests = [][]byte{}
// EIP-6110
if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to parse deposit logs: %w", err)
}
// EIP-7002
if err := ProcessWithdrawalQueue(&requests, evm); err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to process withdrawal queue: %w", err)
}
// EIP-7251
if err := ProcessConsolidationQueue(&requests, evm); err != nil {
- return nil, err
+ return nil, fmt.Errorf("failed to process consolidation queue: %w", err)
}
}
@@ -142,7 +142,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// ApplyTransactionWithEVM attempts to apply a transaction to the given state database
// and uses the input parameters for its environment similar to ApplyTransaction. However,
-// this method takes an already created EVM instanceproto as input.
+// this method takes an already created EVM instance as input.
func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, blockTime uint64, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (receipt *types.Receipt, err error) {
if hooks := evm.Config.Tracer; hooks != nil {
if hooks.OnTxStart != nil {
diff --git a/core/stateless/stats.go b/core/stateless/stats.go
new file mode 100644
index 0000000000..adc898929b
--- /dev/null
+++ b/core/stateless/stats.go
@@ -0,0 +1,117 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package stateless
+
+import (
+ "maps"
+ "slices"
+ "sort"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+var (
+ accountTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/account/depth/avg", nil)
+ accountTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/account/depth/min", nil)
+ accountTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/account/depth/max", nil)
+
+ storageTrieDepthAvg = metrics.NewRegisteredGauge("witness/trie/storage/depth/avg", nil)
+ storageTrieDepthMin = metrics.NewRegisteredGauge("witness/trie/storage/depth/min", nil)
+ storageTrieDepthMax = metrics.NewRegisteredGauge("witness/trie/storage/depth/max", nil)
+)
+
+// depthStats tracks min/avg/max statistics for trie access depths.
+type depthStats struct {
+ totalDepth int64
+ samples int64
+ minDepth int64
+ maxDepth int64
+}
+
+// newDepthStats creates a new depthStats with default values.
+func newDepthStats() *depthStats {
+ return &depthStats{minDepth: -1}
+}
+
+// add records a new depth sample.
+func (d *depthStats) add(n int64) {
+ if n < 0 {
+ return
+ }
+ d.totalDepth += n
+ d.samples++
+
+ if d.minDepth == -1 || n < d.minDepth {
+ d.minDepth = n
+ }
+ if n > d.maxDepth {
+ d.maxDepth = n
+ }
+}
+
+// report uploads the collected statistics into the provided gauges.
+func (d *depthStats) report(maxGauge, minGauge, avgGauge *metrics.Gauge) {
+ if d.samples == 0 {
+ return
+ }
+ maxGauge.Update(d.maxDepth)
+ minGauge.Update(d.minDepth)
+ avgGauge.Update(d.totalDepth / d.samples)
+}
+
+// WitnessStats aggregates statistics for account and storage trie accesses.
+type WitnessStats struct {
+ accountTrie *depthStats
+ storageTrie *depthStats
+}
+
+// NewWitnessStats creates a new WitnessStats collector.
+func NewWitnessStats() *WitnessStats {
+ return &WitnessStats{
+ accountTrie: newDepthStats(),
+ storageTrie: newDepthStats(),
+ }
+}
+
+// Add records trie access depths from the given node paths.
+// If `owner` is the zero hash, accesses are attributed to the account trie;
+// otherwise, they are attributed to the storage trie of that account.
+func (s *WitnessStats) Add(nodes map[string][]byte, owner common.Hash) {
+ // Extract paths from the nodes map
+ paths := slices.Collect(maps.Keys(nodes))
+ sort.Strings(paths)
+
+ for i, path := range paths {
+ // If current path is a prefix of the next path, it's not a leaf.
+ // The last path is always a leaf.
+ if i == len(paths)-1 || !strings.HasPrefix(paths[i+1], paths[i]) {
+ if owner == (common.Hash{}) {
+ s.accountTrie.add(int64(len(path)))
+ } else {
+ s.storageTrie.add(int64(len(path)))
+ }
+ }
+ }
+}
+
+// ReportMetrics reports the collected statistics to the global metrics registry.
+func (s *WitnessStats) ReportMetrics() {
+ s.accountTrie.report(accountTrieDepthMax, accountTrieDepthMin, accountTrieDepthAvg)
+ s.storageTrie.report(storageTrieDepthMax, storageTrieDepthMin, storageTrieDepthAvg)
+}
diff --git a/core/stateless/stats_test.go b/core/stateless/stats_test.go
new file mode 100644
index 0000000000..51c78cc9c9
--- /dev/null
+++ b/core/stateless/stats_test.go
@@ -0,0 +1,207 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package stateless
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func TestWitnessStatsAdd(t *testing.T) {
+ tests := []struct {
+ name string
+ nodes map[string][]byte
+ owner common.Hash
+ expectedAccountDepth int64
+ expectedStorageDepth int64
+ }{
+ {
+ name: "empty nodes",
+ nodes: map[string][]byte{},
+ owner: common.Hash{},
+ expectedAccountDepth: 0,
+ expectedStorageDepth: 0,
+ },
+ {
+ name: "single account trie leaf",
+ nodes: map[string][]byte{
+ "abc": []byte("data"),
+ },
+ owner: common.Hash{},
+ expectedAccountDepth: 3,
+ expectedStorageDepth: 0,
+ },
+ {
+ name: "account trie with internal nodes",
+ nodes: map[string][]byte{
+ "a": []byte("data1"),
+ "ab": []byte("data2"),
+ "abc": []byte("data3"),
+ },
+ owner: common.Hash{},
+ expectedAccountDepth: 3, // Only "abc" is a leaf
+ expectedStorageDepth: 0,
+ },
+ {
+ name: "multiple account trie branches",
+ nodes: map[string][]byte{
+ "a": []byte("data1"),
+ "ab": []byte("data2"),
+ "abc": []byte("data3"),
+ "b": []byte("data4"),
+ "bc": []byte("data5"),
+ "bcd": []byte("data6"),
+ },
+ owner: common.Hash{},
+ expectedAccountDepth: 6, // "abc" (3) + "bcd" (3) = 6
+ expectedStorageDepth: 0,
+ },
+ {
+ name: "siblings are all leaves",
+ nodes: map[string][]byte{
+ "aa": []byte("data1"),
+ "ab": []byte("data2"),
+ "ac": []byte("data3"),
+ },
+ owner: common.Hash{},
+ expectedAccountDepth: 6, // 2 + 2 + 2 = 6
+ expectedStorageDepth: 0,
+ },
+ {
+ name: "storage trie leaves",
+ nodes: map[string][]byte{
+ "1": []byte("data1"),
+ "12": []byte("data2"),
+ "123": []byte("data3"),
+ "124": []byte("data4"),
+ },
+ owner: common.HexToHash("0x1234"),
+ expectedAccountDepth: 0,
+ expectedStorageDepth: 6, // "123" (3) + "124" (3) = 6
+ },
+ {
+ name: "complex trie structure",
+ nodes: map[string][]byte{
+ "1": []byte("data1"),
+ "12": []byte("data2"),
+ "123": []byte("data3"),
+ "124": []byte("data4"),
+ "2": []byte("data5"),
+ "23": []byte("data6"),
+ "234": []byte("data7"),
+ "235": []byte("data8"),
+ "3": []byte("data9"),
+ },
+ owner: common.Hash{},
+ expectedAccountDepth: 13, // "123"(3) + "124"(3) + "234"(3) + "235"(3) + "3"(1) = 13
+ expectedStorageDepth: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ stats := NewWitnessStats()
+ stats.Add(tt.nodes, tt.owner)
+
+ // Check account trie depth
+ if stats.accountTrie.totalDepth != tt.expectedAccountDepth {
+ t.Errorf("Account trie total depth = %d, want %d", stats.accountTrie.totalDepth, tt.expectedAccountDepth)
+ }
+
+ // Check storage trie depth
+ if stats.storageTrie.totalDepth != tt.expectedStorageDepth {
+ t.Errorf("Storage trie total depth = %d, want %d", stats.storageTrie.totalDepth, tt.expectedStorageDepth)
+ }
+ })
+ }
+}
+
+func TestWitnessStatsMinMax(t *testing.T) {
+ stats := NewWitnessStats()
+
+ // Add some account trie nodes with varying depths
+ stats.Add(map[string][]byte{
+ "a": []byte("data1"),
+ "ab": []byte("data2"),
+ "abc": []byte("data3"),
+ "abcd": []byte("data4"),
+ "abcde": []byte("data5"),
+ }, common.Hash{})
+
+ // Only "abcde" is a leaf (depth 5)
+ if stats.accountTrie.minDepth != 5 {
+ t.Errorf("Account trie min depth = %d, want %d", stats.accountTrie.minDepth, 5)
+ }
+ if stats.accountTrie.maxDepth != 5 {
+ t.Errorf("Account trie max depth = %d, want %d", stats.accountTrie.maxDepth, 5)
+ }
+
+ // Add more leaves with different depths
+ stats.Add(map[string][]byte{
+ "x": []byte("data6"),
+ "yz": []byte("data7"),
+ }, common.Hash{})
+
+ // Now we have leaves at depths 1, 2, and 5
+ if stats.accountTrie.minDepth != 1 {
+ t.Errorf("Account trie min depth after update = %d, want %d", stats.accountTrie.minDepth, 1)
+ }
+ if stats.accountTrie.maxDepth != 5 {
+ t.Errorf("Account trie max depth after update = %d, want %d", stats.accountTrie.maxDepth, 5)
+ }
+}
+
+func TestWitnessStatsAverage(t *testing.T) {
+ stats := NewWitnessStats()
+
+ // Add nodes that will create leaves at depths 2, 3, and 4
+ stats.Add(map[string][]byte{
+ "aa": []byte("data1"),
+ "bb": []byte("data2"),
+ "ccc": []byte("data3"),
+ "dddd": []byte("data4"),
+ }, common.Hash{})
+
+ // All are leaves: 2 + 2 + 3 + 4 = 11 total, 4 samples
+ expectedAvg := int64(11) / int64(4)
+ actualAvg := stats.accountTrie.totalDepth / stats.accountTrie.samples
+
+ if actualAvg != expectedAvg {
+ t.Errorf("Account trie average depth = %d, want %d", actualAvg, expectedAvg)
+ }
+}
+
+func BenchmarkWitnessStatsAdd(b *testing.B) {
+ // Create a realistic trie node structure
+ nodes := make(map[string][]byte)
+ for i := 0; i < 100; i++ {
+ base := string(rune('a' + i%26))
+ nodes[base] = []byte("data")
+ for j := 0; j < 9; j++ {
+ key := base + string(rune('0'+j))
+ nodes[key] = []byte("data")
+ }
+ }
+
+ stats := NewWitnessStats()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ stats.Add(nodes, common.Hash{})
+ }
+}
diff --git a/core/stateless/witness.go b/core/stateless/witness.go
index aecfad1d52..371a128f48 100644
--- a/core/stateless/witness.go
+++ b/core/stateless/witness.go
@@ -58,7 +58,7 @@ func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) {
}
headers = append(headers, parent)
}
- // Create the wtness with a reconstructed gutted out block
+ // Create the witness with a reconstructed gutted out block
return &Witness{
context: context,
Headers: headers,
@@ -88,14 +88,16 @@ func (w *Witness) AddCode(code []byte) {
}
// AddState inserts a batch of MPT trie nodes into the witness.
-func (w *Witness) AddState(nodes map[string]struct{}) {
+func (w *Witness) AddState(nodes map[string][]byte) {
if len(nodes) == 0 {
return
}
w.lock.Lock()
defer w.lock.Unlock()
- maps.Copy(w.State, nodes)
+ for _, value := range nodes {
+ w.State[string(value)] = struct{}{}
+ }
}
// Copy deep-copies the witness object. Witness.Block isn't deep-copied as it
diff --git a/core/tracing/journal_test.go b/core/tracing/journal_test.go
index d9616a2ce8..99447e1e1d 100644
--- a/core/tracing/journal_test.go
+++ b/core/tracing/journal_test.go
@@ -293,7 +293,7 @@ func newTracerAllHooks() *tracerAllHooks {
t := &tracerAllHooks{hooksCalled: make(map[string]bool)}
// Initialize all hooks to false. We will use this to
// get total count of hooks.
- hooksType := reflect.TypeOf((*Hooks)(nil)).Elem()
+ hooksType := reflect.TypeFor[Hooks]()
for i := 0; i < hooksType.NumField(); i++ {
t.hooksCalled[hooksType.Field(i).Name] = false
}
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index 7539b0c2cb..0e9c258ac4 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
@@ -54,6 +55,12 @@ const (
// tiny overflows causing all txs to move a shelf higher, wasting disk space.
txAvgSize = 4 * 1024
+ // txBlobOverhead is an approximation of the overhead that an additional blob
+ // has on transaction size. This is added to the slotter to avoid tiny
+ // overflows causing all txs to move a shelf higher, wasting disk space. A
+ // small buffer is added to the proof overhead.
+ txBlobOverhead = uint32(kzg4844.CellProofsPerBlob*len(kzg4844.Proof{}) + 64)
+
// txMaxSize is the maximum size a single transaction can have, outside
// the included blobs. Since blob transactions are pulled instead of pushed,
// and only a small metadata is kept in ram, the rest is on disk, there is
@@ -82,6 +89,10 @@ const (
// limboedTransactionStore is the subfolder containing the currently included
// but not yet finalized transaction blobs.
limboedTransactionStore = "limbo"
+
+ // storeVersion is the current slotter layout used for the billy.Database
+ // store.
+ storeVersion = 1
)
// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and
@@ -388,6 +399,14 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
}
p.head, p.state = head, state
+ // Create new slotter for pre-Osaka blob configuration.
+ slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
+
+ // See if we need to migrate the queue blob store after fusaka
+ slotter, err = tryMigrate(p.chain.Config(), slotter, queuedir)
+ if err != nil {
+ return err
+ }
// Index all transactions on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, blob []byte) {
@@ -395,7 +414,6 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
fails = append(fails, id)
}
}
- slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index)
if err != nil {
return err
@@ -429,7 +447,7 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserver txpool.Reser
// Pool initialized, attach the blob limbo to it to track blobs included
// recently but not yet finalized
- p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
+ p.limbo, err = newLimbo(p.chain.Config(), limbodir)
if err != nil {
p.Close()
return err
@@ -1303,32 +1321,86 @@ func (p *BlobPool) GetMetadata(hash common.Hash) *txpool.TxMetadata {
// GetBlobs returns a number of blobs and proofs for the given versioned hashes.
// This is a utility method for the engine API, enabling consensus clients to
// retrieve blobs from the pools directly instead of the network.
-func (p *BlobPool) GetBlobs(vhashes []common.Hash) []*types.BlobTxSidecar {
- sidecars := make([]*types.BlobTxSidecar, len(vhashes))
- for idx, vhash := range vhashes {
- // Retrieve the datastore item (in a short lock)
- p.lock.RLock()
- id, exists := p.lookup.storeidOfBlob(vhash)
- if !exists {
- p.lock.RUnlock()
+func (p *BlobPool) GetBlobs(vhashes []common.Hash, version byte) ([]*kzg4844.Blob, []kzg4844.Commitment, [][]kzg4844.Proof, error) {
+ var (
+ blobs = make([]*kzg4844.Blob, len(vhashes))
+ commitments = make([]kzg4844.Commitment, len(vhashes))
+ proofs = make([][]kzg4844.Proof, len(vhashes))
+
+ indices = make(map[common.Hash][]int)
+ filled = make(map[common.Hash]struct{})
+ )
+ for i, h := range vhashes {
+ indices[h] = append(indices[h], i)
+ }
+ for _, vhash := range vhashes {
+ // Skip duplicate vhash that was already resolved in a previous iteration
+ if _, ok := filled[vhash]; ok {
continue
}
- data, err := p.store.Get(id)
+ // Retrieve the corresponding blob tx with the vhash
+ p.lock.RLock()
+ txID, exists := p.lookup.storeidOfBlob(vhash)
p.lock.RUnlock()
-
- // After releasing the lock, try to fill any blobs requested
+ if !exists {
+ return nil, nil, nil, fmt.Errorf("blob with vhash %x is not found", vhash)
+ }
+ data, err := p.store.Get(txID)
if err != nil {
- log.Error("Tracked blob transaction missing from store", "id", id, "err", err)
- continue
+ return nil, nil, nil, err
}
- item := new(types.Transaction)
- if err = rlp.DecodeBytes(data, item); err != nil {
- log.Error("Blobs corrupted for traced transaction", "id", id, "err", err)
- continue
+
+ // Decode the blob transaction
+ tx := new(types.Transaction)
+ if err := rlp.DecodeBytes(data, tx); err != nil {
+ return nil, nil, nil, err
+ }
+ sidecar := tx.BlobTxSidecar()
+ if sidecar == nil {
+ return nil, nil, nil, fmt.Errorf("blob tx without sidecar %x", tx.Hash())
+ }
+ // Traverse the blobs in the transaction
+ for i, hash := range tx.BlobHashes() {
+ list, ok := indices[hash]
+ if !ok {
+ continue // non-interesting blob
+ }
+ var pf []kzg4844.Proof
+ switch version {
+ case types.BlobSidecarVersion0:
+ if sidecar.Version == types.BlobSidecarVersion0 {
+ pf = []kzg4844.Proof{sidecar.Proofs[i]}
+ } else {
+ proof, err := kzg4844.ComputeBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = []kzg4844.Proof{proof}
+ }
+ case types.BlobSidecarVersion1:
+ if sidecar.Version == types.BlobSidecarVersion0 {
+ cellProofs, err := kzg4844.ComputeCellProofs(&sidecar.Blobs[i])
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = cellProofs
+ } else {
+ cellProofs, err := sidecar.CellProofsAt(i)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ pf = cellProofs
+ }
+ }
+ for _, index := range list {
+ blobs[index] = &sidecar.Blobs[i]
+ commitments[index] = sidecar.Commitments[i]
+ proofs[index] = pf
+ }
+ filled[hash] = struct{}{}
}
- sidecars[idx] = item.BlobTxSidecar()
}
- return sidecars
+ return blobs, commitments, proofs, nil
}
// AvailableBlobs returns the number of blobs that are available in the subpool.
@@ -1346,6 +1418,31 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
return available
}
+// convertSidecar converts the legacy sidecar in the submitted transactions
+// if Osaka fork has been activated.
+func (p *BlobPool) convertSidecar(txs []*types.Transaction) ([]*types.Transaction, []error) {
+ head := p.chain.CurrentBlock()
+ if !p.chain.Config().IsOsaka(head.Number, head.Time) {
+ return txs, make([]error, len(txs))
+ }
+ var errs []error
+ for _, tx := range txs {
+ sidecar := tx.BlobTxSidecar()
+ if sidecar == nil {
+ errs = append(errs, errors.New("missing sidecar in blob transaction"))
+ continue
+ }
+ if sidecar.Version == types.BlobSidecarVersion0 {
+ if err := sidecar.ToV1(); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ errs = append(errs, nil)
+ }
+ return txs, errs
+}
+
// Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions).
//
@@ -1353,10 +1450,14 @@ func (p *BlobPool) AvailableBlobs(vhashes []common.Hash) int {
// related to the add is finished. Only use this during tests for determinism.
func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var (
+ errs []error
adds = make([]*types.Transaction, 0, len(txs))
- errs = make([]error, len(txs))
)
+ txs, errs = p.convertSidecar(txs)
for i, tx := range txs {
+ if errs[i] != nil {
+ continue
+ }
errs[i] = p.add(tx)
if errs[i] == nil {
adds = append(adds, tx.WithoutBlobTxSidecar())
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index 71afae2e9a..551c854d9b 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -26,6 +26,8 @@ import (
"math/big"
"os"
"path/filepath"
+ "reflect"
+ "slices"
"sync"
"testing"
@@ -46,10 +48,12 @@ import (
)
var (
- testBlobs []*kzg4844.Blob
- testBlobCommits []kzg4844.Commitment
- testBlobProofs []kzg4844.Proof
- testBlobVHashes [][32]byte
+ testBlobs []*kzg4844.Blob
+ testBlobCommits []kzg4844.Commitment
+ testBlobProofs []kzg4844.Proof
+ testBlobCellProofs [][]kzg4844.Proof
+ testBlobVHashes [][32]byte
+ testBlobIndices = make(map[[32]byte]int)
)
const testMaxBlobsPerBlock = 6
@@ -65,7 +69,11 @@ func init() {
testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
testBlobProofs = append(testBlobProofs, testBlobProof)
+ testBlobCellProof, _ := kzg4844.ComputeCellProofs(testBlob)
+ testBlobCellProofs = append(testBlobCellProofs, testBlobCellProof)
+
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
+ testBlobIndices[testBlobVHash] = len(testBlobVHashes)
testBlobVHashes = append(testBlobVHashes, testBlobVHash)
}
}
@@ -216,7 +224,7 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
// makeMultiBlobTx is a utility method to construct a ramdom blob tx with
// certain number of blobs in its sidecar.
-func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, key *ecdsa.PrivateKey) *types.Transaction {
+func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, blobOffset int, key *ecdsa.PrivateKey, version byte) *types.Transaction {
var (
blobs []kzg4844.Blob
blobHashes []common.Hash
@@ -224,10 +232,15 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
proofs []kzg4844.Proof
)
for i := 0; i < blobCount; i++ {
- blobs = append(blobs, *testBlobs[i])
- commitments = append(commitments, testBlobCommits[i])
- proofs = append(proofs, testBlobProofs[i])
- blobHashes = append(blobHashes, testBlobVHashes[i])
+ blobs = append(blobs, *testBlobs[blobOffset+i])
+ commitments = append(commitments, testBlobCommits[blobOffset+i])
+ if version == types.BlobSidecarVersion0 {
+ proofs = append(proofs, testBlobProofs[blobOffset+i])
+ } else {
+ cellProofs, _ := kzg4844.ComputeCellProofs(testBlobs[blobOffset+i])
+ proofs = append(proofs, cellProofs...)
+ }
+ blobHashes = append(blobHashes, testBlobVHashes[blobOffset+i])
}
blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
@@ -238,7 +251,7 @@ func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCa
BlobFeeCap: uint256.NewInt(blobFeeCap),
BlobHashes: blobHashes,
Value: uint256.NewInt(100),
- Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, blobs, commitments, proofs),
+ Sidecar: types.NewBlobTxSidecar(version, blobs, commitments, proofs),
}
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
@@ -396,51 +409,52 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
// whatever is in the pool, it can be retrieved correctly.
func verifyBlobRetrievals(t *testing.T, pool *BlobPool) {
// Collect all the blobs tracked by the pool
- known := make(map[common.Hash]struct{})
+ var (
+ hashes []common.Hash
+ known = make(map[common.Hash]struct{})
+ )
for _, txs := range pool.index {
for _, tx := range txs {
for _, vhash := range tx.vhashes {
known[vhash] = struct{}{}
}
+ hashes = append(hashes, tx.vhashes...)
}
}
- // Attempt to retrieve all test blobs
- hashes := make([]common.Hash, len(testBlobVHashes))
- for i := range testBlobVHashes {
- copy(hashes[i][:], testBlobVHashes[i][:])
+ blobs1, _, proofs1, err := pool.GetBlobs(hashes, types.BlobSidecarVersion0)
+ if err != nil {
+ t.Fatal(err)
}
- sidecars := pool.GetBlobs(hashes)
- var blobs []*kzg4844.Blob
- var proofs []*kzg4844.Proof
- for idx, sidecar := range sidecars {
- if sidecar == nil {
- blobs = append(blobs, nil)
- proofs = append(proofs, nil)
- continue
- }
- blobHashes := sidecar.BlobHashes()
- for i, hash := range blobHashes {
- if hash == hashes[idx] {
- blobs = append(blobs, &sidecar.Blobs[i])
- proofs = append(proofs, &sidecar.Proofs[i])
- }
- }
+ blobs2, _, proofs2, err := pool.GetBlobs(hashes, types.BlobSidecarVersion1)
+ if err != nil {
+ t.Fatal(err)
}
// Cross validate what we received vs what we wanted
- if len(blobs) != len(hashes) || len(proofs) != len(hashes) {
- t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), len(hashes))
+ if len(blobs1) != len(hashes) || len(proofs1) != len(hashes) {
+ t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs1), len(proofs1), len(hashes))
+ return
+ }
+ if len(blobs2) != len(hashes) || len(proofs2) != len(hashes) {
+ t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want blobs %d, want proofs: %d", len(blobs2), len(proofs2), len(hashes), len(hashes))
return
}
for i, hash := range hashes {
// If an item is missing, but shouldn't, error
- if blobs[i] == nil || proofs[i] == nil {
- if _, ok := known[hash]; ok {
- t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
- }
+ if blobs1[i] == nil || proofs1[i] == nil {
+ t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
+ continue
+ }
+ if blobs2[i] == nil || proofs2[i] == nil {
+ t.Errorf("tracked blob retrieval failed: item %d, hash %x", i, hash)
continue
}
// Item retrieved, make sure it matches the expectation
- if *blobs[i] != *testBlobs[i] || *proofs[i] != testBlobProofs[i] {
+ index := testBlobIndices[hash]
+ if *blobs1[i] != *testBlobs[index] || proofs1[i][0] != testBlobProofs[index] {
+ t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
+ continue
+ }
+ if *blobs2[i] != *testBlobs[index] || !slices.Equal(proofs2[i], testBlobCellProofs[index]) {
t.Errorf("retrieved blob or proof mismatch: item %d, hash %x", i, hash)
continue
}
@@ -970,7 +984,7 @@ func TestOpenCap(t *testing.T) {
storage := t.TempDir()
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
- store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotterEIP7594(testMaxBlobsPerBlock), nil)
// Insert a few transactions from a few accounts
var (
@@ -992,7 +1006,7 @@ func TestOpenCap(t *testing.T) {
keep = []common.Address{addr1, addr3}
drop = []common.Address{addr2}
- size = uint64(2 * (txAvgSize + blobSize))
+ size = 2 * (txAvgSize + blobSize + uint64(txBlobOverhead))
)
store.Put(blob1)
store.Put(blob2)
@@ -1001,7 +1015,7 @@ func TestOpenCap(t *testing.T) {
// Verify pool capping twice: first by reducing the data cap, then restarting
// with a high cap to ensure everything was persisted previously
- for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} {
+ for _, datacap := range []uint64{2 * (txAvgSize + blobSize + uint64(txBlobOverhead)), 1000 * (txAvgSize + blobSize + uint64(txBlobOverhead))} {
// Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
@@ -1071,9 +1085,9 @@ func TestChangingSlotterSize(t *testing.T) {
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
- tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1)
- tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, key2)
- tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, key3)
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
+ tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2)
@@ -1149,6 +1163,115 @@ func TestChangingSlotterSize(t *testing.T) {
}
}
+// TestBillyMigration tests the billy migration from the default slotter to
+// the PeerDAS slotter. This tests both the migration of the slotter
+// as well as increasing the slotter size of the new slotter.
+func TestBillyMigration(t *testing.T) {
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+
+ // Create a temporary folder for the persistent backend
+ storage := t.TempDir()
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ os.MkdirAll(filepath.Join(storage, limboedTransactionStore), 0700)
+ // Create the billy with the old slotter
+ oldSlotter := newSlotterEIP7594(6)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, oldSlotter, nil)
+
+ // Create transactions from a few accounts.
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 0, key2, types.BlobSidecarVersion0)
+ tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, 0, key3, types.BlobSidecarVersion0)
+
+ blob1, _ = rlp.EncodeToBytes(tx1)
+ blob2, _ = rlp.EncodeToBytes(tx2)
+ )
+
+ // Write the two safely sized txs to store. note: although the store is
+ // configured for a blob count of 6, it can also support around ~1mb of call
+ // data - all this to say that we aren't using the the absolute largest shelf
+ // available.
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Close()
+
+ // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
+ for _, maxBlobs := range []int{6, 24} {
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
+ statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.Commit(0, true, false)
+
+ // Make custom chain config where the max blob count changes based on the loop variable.
+ zero := uint64(0)
+ config := ¶ms.ChainConfig{
+ ChainID: big.NewInt(1),
+ LondonBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ CancunTime: &zero,
+ OsakaTime: &zero,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: ¶ms.BlobConfig{
+ Target: maxBlobs / 2,
+ Max: maxBlobs,
+ UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
+ },
+ Osaka: ¶ms.BlobConfig{
+ Target: maxBlobs / 2,
+ Max: maxBlobs,
+ UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
+ },
+ },
+ }
+ chain := &testBlockChain{
+ config: config,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain, nil)
+ if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+
+ // Try to add the big blob tx. In the initial iteration it should overflow
+ // the pool. On the subsequent iteration it should be accepted.
+ errs := pool.Add([]*types.Transaction{tx3}, true)
+ if _, ok := pool.index[addr3]; ok && maxBlobs == 6 {
+ t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
+ } else if !ok && maxBlobs == 10 {
+ t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
+ }
+
+ // Verify the regular two txs are always available.
+ if got := pool.Get(tx1.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
+ }
+ if got := pool.Get(tx2.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
+ }
+
+ // Verify all the calculated pool internals. Interestingly, this is **not**
+ // a duplication of the above checks, this actually validates the verifier
+ // using the above already hard coded checks.
+ //
+ // Do not remove this, nor alter the above to be generic.
+ verifyPoolInternals(t, pool)
+
+ pool.Close()
+ }
+}
+
// TestBlobCountLimit tests the blobpool enforced limits on the max blob count.
func TestBlobCountLimit(t *testing.T) {
var (
@@ -1191,8 +1314,8 @@ func TestBlobCountLimit(t *testing.T) {
// Attempt to add transactions.
var (
- tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1)
- tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, key2)
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 7, 0, key2, types.BlobSidecarVersion0)
)
errs := pool.Add([]*types.Transaction{tx1, tx2}, true)
@@ -1675,6 +1798,224 @@ func TestAdd(t *testing.T) {
}
}
+// Tests that adding the transactions with legacy sidecar and expect them to
+// be converted to new format correctly.
+func TestAddLegacyBlobTx(t *testing.T) {
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ )
+
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
+ statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.Commit(0, true, false)
+
+ chain := &testBlockChain{
+ config: params.MergedTestChainConfig,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: t.TempDir()}, chain, nil)
+ if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+
+ // Attempt to add legacy blob transactions.
+ var (
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion0)
+ tx3 = makeMultiBlobTx(1, 1, 800, 70, 6, 12, key2, types.BlobSidecarVersion1)
+ )
+ errs := pool.Add([]*types.Transaction{tx1, tx2, tx3}, true)
+ for _, err := range errs {
+ if err != nil {
+ t.Fatalf("failed to add tx: %v", err)
+ }
+ }
+ verifyPoolInternals(t, pool)
+ pool.Close()
+}
+
+func TestGetBlobs(t *testing.T) {
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
+
+ // Create a temporary folder for the persistent backend
+ storage := t.TempDir()
+
+ os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
+ store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(params.BlobTxMaxBlobs), nil)
+
+ // Create transactions from a few accounts.
+ var (
+ key1, _ = crypto.GenerateKey()
+ key2, _ = crypto.GenerateKey()
+ key3, _ = crypto.GenerateKey()
+
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = crypto.PubkeyToAddress(key2.PublicKey)
+ addr3 = crypto.PubkeyToAddress(key3.PublicKey)
+
+ tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, 0, key1, types.BlobSidecarVersion0) // [0, 6)
+ tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, 6, key2, types.BlobSidecarVersion1) // [6, 12)
+ tx3 = makeMultiBlobTx(0, 1, 800, 110, 6, 12, key3, types.BlobSidecarVersion0) // [12, 18)
+
+ blob1, _ = rlp.EncodeToBytes(tx1)
+ blob2, _ = rlp.EncodeToBytes(tx2)
+ blob3, _ = rlp.EncodeToBytes(tx3)
+ )
+
+ // Write the two safely sized txs to store. note: although the store is
+ // configured for a blob count of 6, it can also support around ~1mb of call
+ // data - all this to say that we aren't using the the absolute largest shelf
+ // available.
+ store.Put(blob1)
+ store.Put(blob2)
+ store.Put(blob3)
+ store.Close()
+
+ // Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
+ statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
+ statedb.Commit(0, true, false)
+
+ // Make custom chain config where the max blob count changes based on the loop variable.
+ cancunTime := uint64(0)
+ config := ¶ms.ChainConfig{
+ ChainID: big.NewInt(1),
+ LondonBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ CancunTime: &cancunTime,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: ¶ms.BlobConfig{
+ Target: 12,
+ Max: 24,
+ UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
+ },
+ },
+ }
+ chain := &testBlockChain{
+ config: config,
+ basefee: uint256.NewInt(1050),
+ blobfee: uint256.NewInt(105),
+ statedb: statedb,
+ }
+ pool := New(Config{Datadir: storage}, chain, nil)
+ if err := pool.Init(1, chain.CurrentBlock(), newReserver()); err != nil {
+ t.Fatalf("failed to create blob pool: %v", err)
+ }
+
+ // Verify the regular three txs are always available.
+ if got := pool.Get(tx1.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
+ }
+ if got := pool.Get(tx2.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
+ }
+ if got := pool.Get(tx3.Hash()); got == nil {
+ t.Errorf("expected tx %s from %s in pool", tx3.Hash(), addr3)
+ }
+
+ cases := []struct {
+ start int
+ limit int
+ version byte
+ expErr bool
+ }{
+ {
+ start: 0, limit: 6,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 0, limit: 6,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 3, limit: 9,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 3, limit: 9,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 3, limit: 15,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 3, limit: 15,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 0, limit: 18,
+ version: types.BlobSidecarVersion0,
+ },
+ {
+ start: 0, limit: 18,
+ version: types.BlobSidecarVersion1,
+ },
+ {
+ start: 18, limit: 20,
+ version: types.BlobSidecarVersion0,
+ expErr: true,
+ },
+ }
+ for i, c := range cases {
+ var vhashes []common.Hash
+ for j := c.start; j < c.limit; j++ {
+ vhashes = append(vhashes, testBlobVHashes[j])
+ }
+ blobs, _, proofs, err := pool.GetBlobs(vhashes, c.version)
+
+ if c.expErr {
+ if err == nil {
+ t.Errorf("Unexpected return, want error for case %d", i)
+ }
+ } else {
+ if err != nil {
+ t.Errorf("Unexpected error for case %d, %v", i, err)
+ }
+ // Cross validate what we received vs what we wanted
+ length := c.limit - c.start
+ if len(blobs) != length || len(proofs) != length {
+ t.Errorf("retrieved blobs/proofs size mismatch: have %d/%d, want %d", len(blobs), len(proofs), length)
+ continue
+ }
+ for j := 0; j < len(blobs); j++ {
+ // If an item is missing, but shouldn't, error
+ if blobs[j] == nil || proofs[j] == nil {
+ t.Errorf("tracked blob retrieval failed: item %d, hash %x", j, vhashes[j])
+ continue
+ }
+ // Item retrieved, make sure the blob matches the expectation
+ if *blobs[j] != *testBlobs[c.start+j] {
+ t.Errorf("retrieved blob mismatch: item %d, hash %x", j, vhashes[j])
+ continue
+ }
+ // Item retrieved, make sure the proof matches the expectation
+ if c.version == types.BlobSidecarVersion0 {
+ if proofs[j][0] != testBlobProofs[c.start+j] {
+ t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
+ }
+ } else {
+ want, _ := kzg4844.ComputeCellProofs(blobs[j])
+ if !reflect.DeepEqual(want, proofs[j]) {
+ t.Errorf("retrieved proof mismatch: item %d, hash %x", j, vhashes[j])
+ }
+ }
+ }
+ }
+ }
+
+ pool.Close()
+}
+
// fakeBilly is a billy.Database implementation which just drops data on the floor.
type fakeBilly struct {
billy.Database
diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go
index 99d1b4ad6b..50c40c9d83 100644
--- a/core/txpool/blobpool/limbo.go
+++ b/core/txpool/blobpool/limbo.go
@@ -20,8 +20,10 @@ import (
"errors"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/billy"
)
@@ -48,11 +50,21 @@ type limbo struct {
}
// newLimbo opens and indexes a set of limboed blob transactions.
-func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) {
+func newLimbo(config *params.ChainConfig, datadir string) (*limbo, error) {
l := &limbo{
index: make(map[common.Hash]uint64),
groups: make(map[uint64]map[uint64]common.Hash),
}
+
+ // Create new slotter for pre-Osaka blob configuration.
+ slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(config))
+
+ // See if we need to migrate the limbo after fusaka.
+ slotter, err := tryMigrate(config, slotter, datadir)
+ if err != nil {
+ return nil, err
+ }
+
// Index all limboed blobs on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, data []byte) {
@@ -60,7 +72,7 @@ func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) {
fails = append(fails, id)
}
}
- store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index)
+ store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, slotter, index)
if err != nil {
return nil, err
}
diff --git a/core/txpool/blobpool/slotter.go b/core/txpool/blobpool/slotter.go
index 84ccc0f27b..9b793e366c 100644
--- a/core/txpool/blobpool/slotter.go
+++ b/core/txpool/blobpool/slotter.go
@@ -16,6 +16,49 @@
package blobpool
+import (
+ "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/billy"
+)
+
+// tryMigrate checks if the billy needs to be migrated and migrates if needed.
+// Returns a slotter that can be used for the database.
+func tryMigrate(config *params.ChainConfig, slotter billy.SlotSizeFn, datadir string) (billy.SlotSizeFn, error) {
+ // Check if we need to migrate our blob db to the new slotter.
+ if config.OsakaTime != nil {
+ // Open the store using the version slotter to see if any version has been
+ // written.
+ var version int
+ index := func(_ uint64, _ uint32, blob []byte) {
+ version = max(version, parseSlotterVersion(blob))
+ }
+ store, err := billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), index)
+ if err != nil {
+ return nil, err
+ }
+ store.Close()
+
+ // If the version found is less than the currently configured store version,
+ // perform a migration then write the updated version of the store.
+ if version < storeVersion {
+ newSlotter := newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
+ if err := billy.Migrate(billy.Options{Path: datadir, Repair: true}, slotter, newSlotter); err != nil {
+ return nil, err
+ }
+ store, err = billy.Open(billy.Options{Path: datadir}, newVersionSlotter(), nil)
+ if err != nil {
+ return nil, err
+ }
+ writeSlotterVersion(store, storeVersion)
+ store.Close()
+ }
+ // Set the slotter to the format now that the Osaka is active.
+ slotter = newSlotterEIP7594(eip4844.LatestMaxBlobsPerBlock(config))
+ }
+ return slotter, nil
+}
+
// newSlotter creates a helper method for the Billy datastore that returns the
// individual shelf sizes used to store transactions in.
//
@@ -25,7 +68,7 @@ package blobpool
// The slotter also creates a shelf for 0-blob transactions. Whilst those are not
// allowed in the current protocol, having an empty shelf is not a relevant use
// of resources, but it makes stress testing with junk transactions simpler.
-func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) {
+func newSlotter(maxBlobsPerTransaction int) billy.SlotSizeFn {
slotsize := uint32(txAvgSize)
slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
@@ -36,3 +79,42 @@ func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) {
return slotsize, finished
}
}
+
+// newSlotterEIP7594 creates a different slotter for EIP-7594 transactions.
+// EIP-7594 (PeerDAS) changes the average transaction size which means the current
+// static 4KB average size is not enough anymore.
+// This slotter adds a dynamic overhead component to the slotter, which also
+// captures the notion that blob transactions with more blobs are also more likely to
+// to have more calldata.
+func newSlotterEIP7594(maxBlobsPerTransaction int) billy.SlotSizeFn {
+ slotsize := uint32(txAvgSize)
+ slotsize -= uint32(blobSize) + txBlobOverhead // underflows, it's ok, will overflow back in the first return
+
+ return func() (size uint32, done bool) {
+ slotsize += blobSize + txBlobOverhead
+ finished := slotsize > uint32(maxBlobsPerTransaction)*(blobSize+txBlobOverhead)+txMaxSize
+
+ return slotsize, finished
+ }
+}
+
+// newVersionSlotter creates a slotter with a single 8 byte shelf to store
+// version metadata in.
+func newVersionSlotter() billy.SlotSizeFn {
+ return func() (size uint32, done bool) {
+ return 8, true
+ }
+}
+
+// parseSlotterVersion will parse the slotter's version from a given data blob.
+func parseSlotterVersion(blob []byte) int {
+ if len(blob) > 0 {
+ return int(blob[0])
+ }
+ return 0
+}
+
+// writeSlotterVersion writes the current slotter version into the store.
+func writeSlotterVersion(store billy.Database, version int) {
+ store.Put([]byte{byte(version)})
+}
diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go
index 8d46f47d2c..e4cf232f4e 100644
--- a/core/txpool/blobpool/slotter_test.go
+++ b/core/txpool/blobpool/slotter_test.go
@@ -16,7 +16,9 @@
package blobpool
-import "testing"
+import (
+ "testing"
+)
// Tests that the slotter creates the expected database shelves.
func TestNewSlotter(t *testing.T) {
@@ -58,3 +60,44 @@ func TestNewSlotter(t *testing.T) {
}
}
}
+
+// Tests that the slotter creates the expected database shelves.
+func TestNewSlotterEIP7594(t *testing.T) {
+ // Generate the database shelve sizes
+ slotter := newSlotterEIP7594(6)
+
+ var shelves []uint32
+ for {
+ shelf, done := slotter()
+ shelves = append(shelves, shelf)
+ if done {
+ break
+ }
+ }
+ // Compare the database shelves to the expected ones
+ want := []uint32{
+ 0*blobSize + 0*txBlobOverhead + txAvgSize, // 0 blob + some expected tx infos
+ 1*blobSize + 1*txBlobOverhead + txAvgSize, // 1 blob + some expected tx infos
+ 2*blobSize + 2*txBlobOverhead + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 3*blobSize + 3*txBlobOverhead + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 4*blobSize + 4*txBlobOverhead + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data)
+ 5*blobSize + 5*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 6*blobSize + 6*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 7*blobSize + 7*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 8*blobSize + 8*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 9*blobSize + 9*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 10*blobSize + 10*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 11*blobSize + 11*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 12*blobSize + 12*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 13*blobSize + 13*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size
+ 14*blobSize + 14*txBlobOverhead + txAvgSize, // 1-6 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size
+ }
+ if len(shelves) != len(want) {
+ t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want))
+ }
+ for i := 0; i < len(shelves) && i < len(want); i++ {
+ if shelves[i] != want[i] {
+ t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i])
+ }
+ }
+}
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index 3f83e80b55..9202479274 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -580,26 +580,15 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
pool.mu.Lock()
defer pool.mu.Unlock()
- // Convert the new uint256.Int types to the old big.Int ones used by the legacy pool
- var (
- minTipBig *big.Int
- baseFeeBig *big.Int
- )
- if filter.MinTip != nil {
- minTipBig = filter.MinTip.ToBig()
- }
- if filter.BaseFee != nil {
- baseFeeBig = filter.BaseFee.ToBig()
- }
pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
for addr, list := range pool.pending {
txs := list.Flatten()
// If the miner requests tip enforcement, cap the lists now
- if minTipBig != nil || filter.GasLimitCap != 0 {
+ if filter.MinTip != nil || filter.GasLimitCap != 0 {
for i, tx := range txs {
- if minTipBig != nil {
- if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
+ if filter.MinTip != nil {
+ if tx.EffectiveGasTipIntCmp(filter.MinTip, filter.BaseFee) < 0 {
txs = txs[:i]
break
}
diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go
index 0e47c4757a..89ffafe657 100644
--- a/core/txpool/legacypool/list.go
+++ b/core/txpool/legacypool/list.go
@@ -528,7 +528,7 @@ func (l *list) subTotalCost(txs []*types.Transaction) {
// then the heap is sorted based on the effective tip based on the given base fee.
// If baseFee is nil then the sorting is based on gasFeeCap.
type priceHeap struct {
- baseFee *big.Int // heap should always be re-sorted after baseFee is changed
+ baseFee *uint256.Int // heap should always be re-sorted after baseFee is changed
list []*types.Transaction
}
@@ -730,6 +730,10 @@ func (l *pricedList) Reheap() {
// SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not
// necessary to call right before SetBaseFee when processing a new block.
func (l *pricedList) SetBaseFee(baseFee *big.Int) {
- l.urgent.baseFee = baseFee
+ base := new(uint256.Int)
+ if baseFee != nil {
+ base.SetFromBig(baseFee)
+ }
+ l.urgent.baseFee = base
l.Reheap()
}
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index 15907e4228..d76af9cab5 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -22,7 +22,6 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -202,9 +201,8 @@ func validateBlobTx(tx *types.Transaction, head *types.Header, opts *ValidationO
if len(hashes) == 0 {
return errors.New("blobless blob transaction")
}
- maxBlobs := eip4844.MaxBlobsPerBlock(opts.Config, head.Time)
- if len(hashes) > maxBlobs {
- return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), maxBlobs)
+ if len(hashes) > params.BlobTxMaxBlobs {
+ return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobs)
}
if len(sidecar.Blobs) != len(hashes) {
return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(sidecar.Blobs), len(hashes))
diff --git a/core/types/bal/bal_encoding.go b/core/types/bal/bal_encoding.go
index cebd2acc45..24dfafa083 100644
--- a/core/types/bal/bal_encoding.go
+++ b/core/types/bal/bal_encoding.go
@@ -108,7 +108,7 @@ type encodingSlotWrites struct {
Accesses []encodingStorageWrite `ssz-max:"300000"`
}
-// validate returns an instanceproto of the encoding-representation slot writes in
+// validate returns an instance of the encoding-representation slot writes in
// working representation.
func (e *encodingSlotWrites) validate() error {
if slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
@@ -207,7 +207,7 @@ func (b *ConstructionBlockAccessList) EncodeRLP(wr io.Writer) error {
var _ rlp.Encoder = &ConstructionBlockAccessList{}
-// toEncodingObj creates an instanceproto of the ConstructionAccountAccess of the type that is
+// toEncodingObj creates an instance of the ConstructionAccountAccess of the type that is
// used as input for the encoding.
func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAccess {
res := AccountAccess{
@@ -279,7 +279,7 @@ func (a *ConstructionAccountAccess) toEncodingObj(addr common.Address) AccountAc
return res
}
-// toEncodingObj returns an instanceproto of the access list expressed as the type
+// toEncodingObj returns an instance of the access list expressed as the type
// which is used as input for the encoding/decoding.
func (b *ConstructionBlockAccessList) toEncodingObj() *BlockAccessList {
var addresses []common.Address
diff --git a/core/types/block.go b/core/types/block.go
index 575ffda298..1a3f0f1773 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -96,6 +96,7 @@ type Header struct {
WithdrawalsHash *common.Hash `json:"withdrawalsRoot" rlp:"optional"`
// BlobGasUsed was added by EIP-4844 and is ignored in legacy headers.
+ // OP Stack stores the DA footprint in this field starting with the Jovian fork.
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
// ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers.
@@ -128,7 +129,7 @@ func (h *Header) Hash() common.Hash {
return rlpHash(h)
}
-var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size())
+var headerSize = common.StorageSize(reflect.TypeFor[Header]().Size())
// Size returns the approximate memory used by all internal contents. It is used
// to approximate and limit the memory consumption of various caches.
diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go
index def81319aa..b3735d7156 100644
--- a/core/types/gen_receipt_json.go
+++ b/core/types/gen_receipt_json.go
@@ -42,6 +42,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"`
OperatorFeeScalar *hexutil.Uint64 `json:"operatorFeeScalar,omitempty"`
OperatorFeeConstant *hexutil.Uint64 `json:"operatorFeeConstant,omitempty"`
+ DAFootprintGasScalar *hexutil.Uint64 `json:"daFootprintGasScalar,omitempty"`
}
var enc Receipt
enc.Type = hexutil.Uint64(r.Type)
@@ -68,12 +69,9 @@ func (r Receipt) MarshalJSON() ([]byte, error) {
enc.FeeScalar = r.FeeScalar
enc.L1BaseFeeScalar = (*hexutil.Uint64)(r.L1BaseFeeScalar)
enc.L1BlobBaseFeeScalar = (*hexutil.Uint64)(r.L1BlobBaseFeeScalar)
- if r.OperatorFeeScalar != nil {
- enc.OperatorFeeScalar = (*hexutil.Uint64)(r.OperatorFeeScalar)
- }
- if r.OperatorFeeConstant != nil {
- enc.OperatorFeeConstant = (*hexutil.Uint64)(r.OperatorFeeConstant)
- }
+ enc.OperatorFeeScalar = (*hexutil.Uint64)(r.OperatorFeeScalar)
+ enc.OperatorFeeConstant = (*hexutil.Uint64)(r.OperatorFeeConstant)
+ enc.DAFootprintGasScalar = (*hexutil.Uint64)(r.DAFootprintGasScalar)
return json.Marshal(&enc)
}
@@ -106,6 +104,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
L1BlobBaseFeeScalar *hexutil.Uint64 `json:"l1BlobBaseFeeScalar,omitempty"`
OperatorFeeScalar *hexutil.Uint64 `json:"operatorFeeScalar,omitempty"`
OperatorFeeConstant *hexutil.Uint64 `json:"operatorFeeConstant,omitempty"`
+ DAFootprintGasScalar *hexutil.Uint64 `json:"daFootprintGasScalar,omitempty"`
}
var dec Receipt
if err := json.Unmarshal(input, &dec); err != nil {
@@ -194,5 +193,8 @@ func (r *Receipt) UnmarshalJSON(input []byte) error {
if dec.OperatorFeeConstant != nil {
r.OperatorFeeConstant = (*uint64)(dec.OperatorFeeConstant)
}
+ if dec.DAFootprintGasScalar != nil {
+ r.DAFootprintGasScalar = (*uint64)(dec.DAFootprintGasScalar)
+ }
return nil
}
diff --git a/core/types/hashes.go b/core/types/hashes.go
index 05cfaeed74..22f1f946dc 100644
--- a/core/types/hashes.go
+++ b/core/types/hashes.go
@@ -45,4 +45,7 @@ var (
// EmptyVerkleHash is the known hash of an empty verkle trie.
EmptyVerkleHash = common.Hash{}
+
+ // EmptyBinaryHash is the known hash of an empty binary trie.
+ EmptyBinaryHash = common.Hash{}
)
diff --git a/core/types/receipt.go b/core/types/receipt.go
index 27bbc49b95..4dfd464509 100644
--- a/core/types/receipt.go
+++ b/core/types/receipt.go
@@ -86,15 +86,16 @@ type Receipt struct {
TransactionIndex uint `json:"transactionIndex"`
// Optimism: extend receipts with L1 and operator fee info
- L1GasPrice *big.Int `json:"l1GasPrice,omitempty"` // Present from pre-bedrock. L1 Basefee after Bedrock
- L1BlobBaseFee *big.Int `json:"l1BlobBaseFee,omitempty"` // Always nil prior to the Ecotone hardfork
- L1GasUsed *big.Int `json:"l1GasUsed,omitempty"` // Present from pre-bedrock, deprecated as of Fjord
- L1Fee *big.Int `json:"l1Fee,omitempty"` // Present from pre-bedrock
- FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` // Present from pre-bedrock to Ecotone. Nil after Ecotone
- L1BaseFeeScalar *uint64 `json:"l1BaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork
- L1BlobBaseFeeScalar *uint64 `json:"l1BlobBaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork
- OperatorFeeScalar *uint64 `json:"operatorFeeScalar,omitempty"` // Always nil prior to the Isthmus hardfork
- OperatorFeeConstant *uint64 `json:"operatorFeeConstant,omitempty"` // Always nil prior to the Isthmus hardfork
+ L1GasPrice *big.Int `json:"l1GasPrice,omitempty"` // Present from pre-bedrock. L1 Basefee after Bedrock
+ L1BlobBaseFee *big.Int `json:"l1BlobBaseFee,omitempty"` // Always nil prior to the Ecotone hardfork
+ L1GasUsed *big.Int `json:"l1GasUsed,omitempty"` // Present from pre-bedrock, deprecated as of Fjord
+ L1Fee *big.Int `json:"l1Fee,omitempty"` // Present from pre-bedrock
+ FeeScalar *big.Float `json:"l1FeeScalar,omitempty"` // Present from pre-bedrock to Ecotone. Nil after Ecotone
+ L1BaseFeeScalar *uint64 `json:"l1BaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork
+ L1BlobBaseFeeScalar *uint64 `json:"l1BlobBaseFeeScalar,omitempty"` // Always nil prior to the Ecotone hardfork
+ OperatorFeeScalar *uint64 `json:"operatorFeeScalar,omitempty"` // Always nil prior to the Isthmus hardfork
+ OperatorFeeConstant *uint64 `json:"operatorFeeConstant,omitempty"` // Always nil prior to the Isthmus hardfork
+ DAFootprintGasScalar *uint64 `json:"daFootprintGasScalar,omitempty"` // Always nil prior to the Jovian hardfork
}
type receiptMarshaling struct {
@@ -121,6 +122,7 @@ type receiptMarshaling struct {
DepositReceiptVersion *hexutil.Uint64
OperatorFeeScalar *hexutil.Uint64
OperatorFeeConstant *hexutil.Uint64
+ DAFootprintGasScalar *hexutil.Uint64
}
// receiptRLP is the consensus encoding of a receipt.
@@ -612,26 +614,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, blockHash common.Has
logIndex += uint(len(rs[i].Logs))
}
- if config.Optimism != nil && len(txs) >= 2 && config.IsBedrock(new(big.Int).SetUint64(blockNumber)) {
- gasParams, err := extractL1GasParams(config, blockTime, txs[0].Data())
- if err != nil {
- return err
- }
- for i := 0; i < len(rs); i++ {
- if txs[i].IsDepositTx() {
- continue
- }
- rs[i].L1GasPrice = gasParams.l1BaseFee
- rs[i].L1BlobBaseFee = gasParams.l1BlobBaseFee
- rs[i].L1Fee, rs[i].L1GasUsed = gasParams.costFunc(txs[i].RollupCostData())
- rs[i].FeeScalar = gasParams.feeScalar
- rs[i].L1BaseFeeScalar = u32ptrTou64ptr(gasParams.l1BaseFeeScalar)
- rs[i].L1BlobBaseFeeScalar = u32ptrTou64ptr(gasParams.l1BlobBaseFeeScalar)
- if gasParams.operatorFeeScalar != nil && gasParams.operatorFeeConstant != nil && (*gasParams.operatorFeeScalar != 0 || *gasParams.operatorFeeConstant != 0) {
- rs[i].OperatorFeeScalar = u32ptrTou64ptr(gasParams.operatorFeeScalar)
- rs[i].OperatorFeeConstant = gasParams.operatorFeeConstant
- }
- }
+ if config.IsOptimismBedrock(new(big.Int).SetUint64(blockNumber)) && len(txs) >= 2 {
+ return rs.deriveOPStackFields(config, blockTime, txs)
}
return nil
}
diff --git a/core/types/receipt_opstack.go b/core/types/receipt_opstack.go
new file mode 100644
index 0000000000..9bc69efca5
--- /dev/null
+++ b/core/types/receipt_opstack.go
@@ -0,0 +1,54 @@
+package types
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// deriveOPStackFields derives the OP Stack specific fields for each receipt.
+// It must only be called for blocks with at least one transaction (the L1 attributes deposit).
+func (rs Receipts) deriveOPStackFields(config *params.ChainConfig, blockTime uint64, txs []*Transaction) error {
+ // Exit early if there are only deposit transactions, for which no fields are derived.
+ if txs[len(txs)-1].IsDepositTx() {
+ return nil
+ }
+
+ l1AttributesData := txs[0].Data()
+ gasParams, err := extractL1GasParams(config, blockTime, l1AttributesData)
+ if err != nil {
+ return fmt.Errorf("failed to extract L1 gas params: %w", err)
+ }
+
+ var daFootprintGasScalar uint64
+ isJovian := config.IsJovian(blockTime)
+ if isJovian {
+ scalar, err := ExtractDAFootprintGasScalar(l1AttributesData)
+ if err != nil {
+ return fmt.Errorf("failed to extract DA footprint gas scalar: %w", err)
+ }
+ daFootprintGasScalar = uint64(scalar)
+ }
+
+ for i := range rs {
+ if txs[i].IsDepositTx() {
+ continue
+ }
+ rs[i].L1GasPrice = gasParams.l1BaseFee
+ rs[i].L1BlobBaseFee = gasParams.l1BlobBaseFee
+ rcd := txs[i].RollupCostData()
+ rs[i].L1Fee, rs[i].L1GasUsed = gasParams.costFunc(rcd)
+ rs[i].FeeScalar = gasParams.feeScalar
+ rs[i].L1BaseFeeScalar = u32ptrTou64ptr(gasParams.l1BaseFeeScalar)
+ rs[i].L1BlobBaseFeeScalar = u32ptrTou64ptr(gasParams.l1BlobBaseFeeScalar)
+ if gasParams.operatorFeeScalar != nil && gasParams.operatorFeeConstant != nil && (*gasParams.operatorFeeScalar != 0 || *gasParams.operatorFeeConstant != 0) {
+ rs[i].OperatorFeeScalar = u32ptrTou64ptr(gasParams.operatorFeeScalar)
+ rs[i].OperatorFeeConstant = gasParams.operatorFeeConstant
+ }
+ if isJovian {
+ rs[i].DAFootprintGasScalar = &daFootprintGasScalar
+ rs[i].BlobGasUsed = daFootprintGasScalar * rcd.EstimatedDASize().Uint64()
+ }
+ }
+ return nil
+}
diff --git a/core/types/receipt_opstack_test.go b/core/types/receipt_opstack_test.go
new file mode 100644
index 0000000000..c49bc7bbd4
--- /dev/null
+++ b/core/types/receipt_opstack_test.go
@@ -0,0 +1,500 @@
+package types
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/kylelemons/godebug/diff"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ bedrockGenesisTestConfig = func() *params.ChainConfig {
+ conf := *params.AllCliqueProtocolChanges // copy the config
+ conf.Clique = nil
+ conf.BedrockBlock = big.NewInt(0)
+ conf.Optimism = ¶ms.OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10}
+ return &conf
+ }()
+ ecotoneTestConfig = func() *params.ChainConfig {
+ conf := *bedrockGenesisTestConfig // copy the config
+ time := uint64(0)
+ conf.EcotoneTime = &time
+ return &conf
+ }()
+ isthmusTestConfig = func() *params.ChainConfig {
+ conf := *ecotoneTestConfig // copy the config
+ time := uint64(0)
+ conf.FjordTime = &time
+ conf.GraniteTime = &time
+ conf.HoloceneTime = &time
+ conf.IsthmusTime = &time
+ return &conf
+ }()
+ jovianTestConfig = func() *params.ChainConfig {
+ conf := *isthmusTestConfig // copy the config
+ time := uint64(0)
+ conf.JovianTime = &time
+ return &conf
+ }()
+
+ depositReceiptNoNonce = &Receipt{
+ Status: ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ Logs: []*Log{
+ {
+ Address: common.BytesToAddress([]byte{0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ {
+ Address: common.BytesToAddress([]byte{0x01, 0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ },
+ Type: DepositTxType,
+ }
+ nonce = uint64(1234)
+ depositReceiptWithNonce = &Receipt{
+ Status: ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ DepositNonce: &nonce,
+ DepositReceiptVersion: nil,
+ Logs: []*Log{
+ {
+ Address: common.BytesToAddress([]byte{0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ {
+ Address: common.BytesToAddress([]byte{0x01, 0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ },
+ Type: DepositTxType,
+ }
+ version = CanyonDepositReceiptVersion
+ depositReceiptWithNonceAndVersion = &Receipt{
+ Status: ReceiptStatusFailed,
+ CumulativeGasUsed: 1,
+ DepositNonce: &nonce,
+ DepositReceiptVersion: &version,
+ Logs: []*Log{
+ {
+ Address: common.BytesToAddress([]byte{0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ {
+ Address: common.BytesToAddress([]byte{0x01, 0x11}),
+ Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
+ Data: []byte{0x01, 0x00, 0xff},
+ },
+ },
+ Type: DepositTxType,
+ }
+
+ daFootprintGasScalar = uint16(400)
+)
+
+func clearComputedFieldsOnOPStackReceipts(receipts []*Receipt) []*Receipt {
+ receipts = clearComputedFieldsOnReceipts(receipts)
+ for _, receipt := range receipts {
+ receipt.L1GasPrice = nil
+ receipt.L1BlobBaseFee = nil
+ receipt.L1GasUsed = nil
+ receipt.L1Fee = nil
+ receipt.FeeScalar = nil
+ receipt.L1BaseFeeScalar = nil
+ receipt.L1BlobBaseFeeScalar = nil
+ receipt.OperatorFeeScalar = nil
+ receipt.OperatorFeeConstant = nil
+ receipt.DAFootprintGasScalar = nil
+ }
+ return receipts
+}
+
+func getOptimismTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1GasUsed, l1Fee *big.Int, feeScalar *big.Float) ([]*Transaction, []*Receipt) {
+ // Create a few transactions to have receipts for
+ txs := Transactions{
+ NewTx(&DepositTx{
+ To: nil, // contract creation
+ Value: big.NewInt(6),
+ Gas: 50,
+ Data: l1AttributesPayload,
+ }),
+ emptyTx,
+ }
+
+ // Create the corresponding receipts
+ receipts := Receipts{
+ &Receipt{
+ Type: DepositTxType,
+ PostState: common.Hash{5}.Bytes(),
+ CumulativeGasUsed: 50 + 15,
+ Logs: []*Log{
+ {
+ Address: common.BytesToAddress([]byte{0x33}),
+ // derived fields:
+ BlockNumber: blockNumber.Uint64(),
+ TxHash: txs[0].Hash(),
+ TxIndex: 0,
+ BlockHash: blockHash,
+ Index: 0,
+ },
+ {
+ Address: common.BytesToAddress([]byte{0x03, 0x33}),
+ // derived fields:
+ BlockNumber: blockNumber.Uint64(),
+ TxHash: txs[0].Hash(),
+ TxIndex: 0,
+ BlockHash: blockHash,
+ Index: 1,
+ },
+ },
+ TxHash: txs[0].Hash(),
+ ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"),
+ GasUsed: 65,
+ EffectiveGasPrice: big.NewInt(0),
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ TransactionIndex: 0,
+ DepositNonce: &depNonce1,
+ },
+ &Receipt{
+ Type: LegacyTxType,
+ EffectiveGasPrice: big.NewInt(0),
+ PostState: common.Hash{4}.Bytes(),
+ CumulativeGasUsed: 10,
+ Logs: []*Log{},
+ // derived fields:
+ TxHash: txs[1].Hash(),
+ GasUsed: 18446744073709551561,
+ BlockHash: blockHash,
+ BlockNumber: blockNumber,
+ TransactionIndex: 1,
+ L1GasPrice: l1GasPrice,
+ L1GasUsed: l1GasUsed,
+ L1Fee: l1Fee,
+ FeeScalar: feeScalar,
+ },
+ }
+ for _, receipt := range receipts {
+ receipt.Bloom = CreateBloom(receipt)
+ }
+
+ return txs, receipts
+}
+
+func getOptimismEcotoneTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar *uint64) ([]*Transaction, []*Receipt) {
+ txs, receipts := getOptimismTxReceipts(l1AttributesPayload, l1GasPrice, l1GasUsed, l1Fee, nil)
+ receipts[1].L1BlobBaseFee = l1BlobBaseFee
+ receipts[1].L1BaseFeeScalar = baseFeeScalar
+ receipts[1].L1BlobBaseFeeScalar = blobBaseFeeScalar
+ return txs, receipts
+}
+
+func getOptimismIsthmusTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant *uint64) ([]*Transaction, []*Receipt) {
+ txs, receipts := getOptimismEcotoneTxReceipts(l1AttributesPayload, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee, baseFeeScalar, blobBaseFeeScalar)
+ receipts[1].OperatorFeeScalar = operatorFeeScalar
+ receipts[1].OperatorFeeConstant = operatorFeeConstant
+ return txs, receipts
+}
+
+func getOptimismJovianTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant, daFootprintGasScalar *uint64) ([]*Transaction, []*Receipt) {
+ txs, receipts := getOptimismIsthmusTxReceipts(l1AttributesPayload, l1GasPrice, l1BlobBaseFee, l1GasUsed, l1Fee, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant)
+ receipts[1].DAFootprintGasScalar = daFootprintGasScalar
+ if daFootprintGasScalar != nil {
+ receipts[1].BlobGasUsed = *daFootprintGasScalar * txs[1].RollupCostData().EstimatedDASize().Uint64()
+ }
+ return txs, receipts
+}
+
+func TestDeriveOptimismBedrockTxReceipts(t *testing.T) {
+ // Bedrock style l1 attributes with L1Scalar=7_000_000 (becomes 7 after division), L1Overhead=50, L1BaseFee=1000*1e6
+ payload := common.Hex2Bytes("015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0")
+ // the parameters we use below are defined in rollup_test.go
+ l1GasPrice := baseFee
+ l1GasUsed := bedrockGas
+ feeScalar := big.NewFloat(float64(scalar.Uint64() / 1e6))
+ l1Fee := bedrockFee
+ txs, receipts := getOptimismTxReceipts(payload, l1GasPrice, l1GasUsed, l1Fee, feeScalar)
+
+ // Re-derive receipts.
+ baseFee := big.NewInt(1000)
+ derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts)
+ err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ checkBedrockReceipts(t, receipts, derivedReceipts)
+
+ // Should get same result with the Ecotone config because it will assume this is "first ecotone block"
+ // if it sees the bedrock style L1 attributes.
+ err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ checkBedrockReceipts(t, receipts, derivedReceipts)
+}
+
+func TestDeriveOptimismEcotoneTxReceipts(t *testing.T) {
+ // Ecotone style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6
+ payload := common.Hex2Bytes("440a5e20000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2")
+ // the parameters we use below are defined in rollup_test.go
+ baseFeeScalarUint64 := baseFeeScalar.Uint64()
+ blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
+ txs, receipts := getOptimismEcotoneTxReceipts(payload, baseFee, blobBaseFee, ecotoneGas, ecotoneFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64)
+
+ // Re-derive receipts.
+ baseFee := big.NewInt(1000)
+ derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts)
+ // Should error out if we try to process this with a pre-Ecotone config
+ err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.Error(t, err)
+
+ err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ diffReceipts(t, receipts, derivedReceipts)
+}
+
+func TestDeriveOptimismIsthmusTxReceipts(t *testing.T) {
+ // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=1439103868, operatorFeeConstant=1256417826609331460
+ payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d04")
+ // the parameters we use below are defined in rollup_test.go
+ baseFeeScalarUint64 := baseFeeScalar.Uint64()
+ blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
+ operatorFeeScalarUint64 := operatorFeeScalar.Uint64()
+ operatorFeeConstantUint64 := operatorFeeConstant.Uint64()
+ txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64)
+
+ // Re-derive receipts.
+ baseFee := big.NewInt(1000)
+ derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts)
+ // Should error out if we try to process this with a pre-Isthmus config
+ err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.Error(t, err)
+
+ err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ diffReceipts(t, receipts, derivedReceipts)
+}
+
+func TestDeriveOptimismIsthmusTxReceiptsNoOperatorFee(t *testing.T) {
+ // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=0, operatorFeeConstant=0
+ payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000")
+ // the parameters we use below are defined in rollup_test.go
+ baseFeeScalarUint64 := baseFeeScalar.Uint64()
+ blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
+ txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, nil, nil)
+
+ // Re-derive receipts.
+ baseFee := big.NewInt(1000)
+ derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts)
+ // Should error out if we try to process this with a pre-Isthmus config
+ err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.Error(t, err)
+
+ err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ diffReceipts(t, receipts, derivedReceipts)
+}
+
+func TestDeriveOptimismJovianTxReceipts(t *testing.T) {
+ // Jovian style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=1439103868, operatorFeeConstant=1256417826609331460, daFootprintGasScalar=400
+ payload := common.Hex2Bytes("3db6be2b000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d040190")
+ // the parameters we use below are defined in rollup_test.go
+ baseFeeScalarUint64 := baseFeeScalar.Uint64()
+ blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
+ operatorFeeScalarUint64 := operatorFeeScalar.Uint64()
+ operatorFeeConstantUint64 := operatorFeeConstant.Uint64()
+ daFootprintGasScalarUint64 := uint64(daFootprintGasScalar)
+ txs, receipts := getOptimismJovianTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64, &daFootprintGasScalarUint64)
+
+ // Re-derive receipts.
+ baseFee := big.NewInt(1000)
+ derivedReceipts := clearComputedFieldsOnOPStackReceipts(receipts)
+ // Should error out if we try to process this with a pre-Jovian config
+ err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.Error(t, err)
+
+ err = Receipts(derivedReceipts).DeriveFields(jovianTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
+ require.NoError(t, err)
+ diffReceipts(t, receipts, derivedReceipts)
+}
+
+func diffReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) {
+ // Check diff of receipts against derivedReceipts.
+ r1, err := json.MarshalIndent(receipts, "", " ")
+ if err != nil {
+ t.Fatal("error marshaling input receipts:", err)
+ }
+ r2, err := json.MarshalIndent(derivedReceipts, "", " ")
+ if err != nil {
+ t.Fatal("error marshaling derived receipts:", err)
+ }
+ d := diff.Diff(string(r1), string(r2))
+ if d != "" {
+ t.Fatal("receipts differ:", d)
+ }
+}
+
+func checkBedrockReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) {
+ diffReceipts(t, receipts, derivedReceipts)
+
+ // Check that we preserved the invariant: l1Fee = l1GasPrice * l1GasUsed * l1FeeScalar
+ // but with more difficult int math...
+ l2Rcpt := derivedReceipts[1]
+ l1GasCost := new(big.Int).Mul(l2Rcpt.L1GasPrice, l2Rcpt.L1GasUsed)
+ l1Fee := new(big.Float).Mul(new(big.Float).SetInt(l1GasCost), l2Rcpt.FeeScalar)
+ require.Equal(t, new(big.Float).SetInt(l2Rcpt.L1Fee), l1Fee)
+}
+
+func TestBedrockDepositReceiptUnchanged(t *testing.T) {
+ expectedRlp := common.FromHex("7EF90156A003000000000000000000000000000000000000000000000000000000000000000AB9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0D7940000000000000000000000000000000000000033C001D7940000000000000000000000000000000000000333C002")
+ // Deposit receipt with no nonce
+ receipt := &Receipt{
+ Type: DepositTxType,
+ PostState: common.Hash{3}.Bytes(),
+ CumulativeGasUsed: 10,
+ Logs: []*Log{
+ {Address: common.BytesToAddress([]byte{0x33}), Data: []byte{1}, Topics: []common.Hash{}},
+ {Address: common.BytesToAddress([]byte{0x03, 0x33}), Data: []byte{2}, Topics: []common.Hash{}},
+ },
+ TxHash: common.Hash{},
+ ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}),
+ GasUsed: 4,
+ }
+
+ rlp, err := receipt.MarshalBinary()
+ require.NoError(t, err)
+ require.Equal(t, expectedRlp, rlp)
+
+ // Consensus values should be unchanged after reparsing
+ parsed := new(Receipt)
+ err = parsed.UnmarshalBinary(rlp)
+ require.NoError(t, err)
+ require.Equal(t, receipt.Status, parsed.Status)
+ require.Equal(t, receipt.CumulativeGasUsed, parsed.CumulativeGasUsed)
+ require.Equal(t, receipt.Bloom, parsed.Bloom)
+ require.EqualValues(t, receipt.Logs, parsed.Logs)
+ // And still shouldn't have a nonce
+ require.Nil(t, parsed.DepositNonce)
+ // ..or a deposit nonce
+ require.Nil(t, parsed.DepositReceiptVersion)
+}
+
+// Regolith introduced an inconsistency in behavior between EncodeIndex and MarshalBinary for a
+// deposit transaction receipt. TestReceiptEncodeIndexBugIsEnshrined makes sure this difference is
+// preserved for backwards compatibility purposes, but also that there is no discrepancy for the
+// post-Canyon encoding.
+func TestReceiptEncodeIndexBugIsEnshrined(t *testing.T) {
+ // Check that a post-Regolith, pre-Canyon receipt produces the expected difference between
+ // EncodeIndex and MarshalBinary.
+ buf := new(bytes.Buffer)
+ receipts := Receipts{depositReceiptWithNonce}
+ receipts.EncodeIndex(0, buf)
+ indexBytes := buf.Bytes()
+
+ regularBytes, _ := receipts[0].MarshalBinary()
+
+ require.NotEqual(t, indexBytes, regularBytes)
+
+ // Confirm the buggy encoding is as expected, which means it should encode as if it had no
+ // nonce specified (like that of a non-deposit receipt, whose encoding would differ only in the
+ // type byte).
+ buf.Reset()
+ tempReceipt := *depositReceiptWithNonce
+ tempReceipt.Type = eip1559Receipt.Type
+ buggyBytes, _ := tempReceipt.MarshalBinary()
+
+ require.Equal(t, indexBytes[1:], buggyBytes[1:])
+
+ // check that the post-Canyon encoding has no differences between EncodeIndex and
+ // MarshalBinary.
+ buf.Reset()
+ receipts = Receipts{depositReceiptWithNonceAndVersion}
+ receipts.EncodeIndex(0, buf)
+ indexBytes = buf.Bytes()
+
+ regularBytes, _ = receipts[0].MarshalBinary()
+
+ require.Equal(t, indexBytes, regularBytes)
+
+ // Check that bumping the nonce post-canyon changes the hash
+ bumpedReceipt := *depositReceiptWithNonceAndVersion
+ bumpedNonce := nonce + 1
+ bumpedReceipt.DepositNonce = &bumpedNonce
+ bumpedBytes, _ := bumpedReceipt.MarshalBinary()
+ require.NotEqual(t, regularBytes, bumpedBytes)
+}
+
+func TestRoundTripReceipt(t *testing.T) {
+ tests := []struct {
+ name string
+ rcpt *Receipt
+ }{
+ {name: "Legacy", rcpt: legacyReceipt},
+ {name: "AccessList", rcpt: accessListReceipt},
+ {name: "EIP1559", rcpt: eip1559Receipt},
+ {name: "DepositNoNonce", rcpt: depositReceiptNoNonce},
+ {name: "DepositWithNonce", rcpt: depositReceiptWithNonce},
+ {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ data, err := test.rcpt.MarshalBinary()
+ require.NoError(t, err)
+
+ d := &Receipt{}
+ err = d.UnmarshalBinary(data)
+ require.NoError(t, err)
+ require.Equal(t, test.rcpt, d)
+ require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce)
+ require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion)
+ })
+
+ t.Run(fmt.Sprintf("%sRejectExtraData", test.name), func(t *testing.T) {
+ data, err := test.rcpt.MarshalBinary()
+ require.NoError(t, err)
+ data = append(data, 1, 2, 3, 4)
+ d := &Receipt{}
+ err = d.UnmarshalBinary(data)
+ require.Error(t, err)
+ })
+ }
+}
+
+func TestRoundTripReceiptForStorage(t *testing.T) {
+ tests := []struct {
+ name string
+ rcpt *Receipt
+ }{
+ {name: "Legacy", rcpt: legacyReceipt},
+ {name: "AccessList", rcpt: accessListReceipt},
+ {name: "EIP1559", rcpt: eip1559Receipt},
+ {name: "DepositNoNonce", rcpt: depositReceiptNoNonce},
+ {name: "DepositWithNonce", rcpt: depositReceiptWithNonce},
+ {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ data, err := rlp.EncodeToBytes((*ReceiptForStorage)(test.rcpt))
+ require.NoError(t, err)
+
+ d := &ReceiptForStorage{}
+ err = rlp.DecodeBytes(data, d)
+ require.NoError(t, err)
+ // Only check the stored fields - the others are derived later
+ require.Equal(t, test.rcpt.Status, d.Status)
+ require.Equal(t, test.rcpt.CumulativeGasUsed, d.CumulativeGasUsed)
+ require.Equal(t, test.rcpt.Logs, d.Logs)
+ require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce)
+ require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion)
+ })
+ }
+}
diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go
index 245de4af8c..9d513e0039 100644
--- a/core/types/receipt_test.go
+++ b/core/types/receipt_test.go
@@ -19,7 +19,6 @@ package types
import (
"bytes"
"encoding/json"
- "fmt"
"math"
"math/big"
"reflect"
@@ -31,30 +30,9 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/uint256"
"github.com/kylelemons/godebug/diff"
- "github.com/stretchr/testify/require"
)
var (
- bedrockGenesisTestConfig = func() *params.ChainConfig {
- conf := *params.AllCliqueProtocolChanges // copy the config
- conf.Clique = nil
- conf.BedrockBlock = big.NewInt(0)
- conf.Optimism = ¶ms.OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10}
- return &conf
- }()
- ecotoneTestConfig = func() *params.ChainConfig {
- conf := *bedrockGenesisTestConfig // copy the config
- time := uint64(0)
- conf.EcotoneTime = &time
- return &conf
- }()
- isthmusTestConfig = func() *params.ChainConfig {
- conf := *bedrockGenesisTestConfig // copy the config
- time := uint64(0)
- conf.IsthmusTime = &time
- return &conf
- }()
-
legacyReceipt = &Receipt{
Status: ReceiptStatusFailed,
CumulativeGasUsed: 1,
@@ -105,63 +83,6 @@ var (
},
Type: DynamicFeeTxType,
}
- depositReceiptNoNonce = &Receipt{
- Status: ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- {
- Address: common.BytesToAddress([]byte{0x01, 0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- },
- Type: DepositTxType,
- }
- nonce = uint64(1234)
- depositReceiptWithNonce = &Receipt{
- Status: ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- DepositNonce: &nonce,
- DepositReceiptVersion: nil,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- {
- Address: common.BytesToAddress([]byte{0x01, 0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- },
- Type: DepositTxType,
- }
- version = CanyonDepositReceiptVersion
- depositReceiptWithNonceAndVersion = &Receipt{
- Status: ReceiptStatusFailed,
- CumulativeGasUsed: 1,
- DepositNonce: &nonce,
- DepositReceiptVersion: &version,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- {
- Address: common.BytesToAddress([]byte{0x01, 0x11}),
- Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")},
- Data: []byte{0x01, 0x00, 0xff},
- },
- },
- Type: DepositTxType,
- }
// Create a few transactions to have receipts for
to2 = common.HexToAddress("0x2")
@@ -240,11 +161,13 @@ var (
Gas: 60,
}),
}
+
+ blockNumber = big.NewInt(1)
+ blockTime = uint64(2)
+ blockHash = common.BytesToHash([]byte{0x03, 0x14})
+
depNonce1 = uint64(7)
depNonce2 = uint64(8)
- blockNumber = big.NewInt(1)
- blockTime = uint64(2)
- blockHash = common.BytesToHash([]byte{0x03, 0x14})
canyonDepositReceiptVersion = CanyonDepositReceiptVersion
)
@@ -493,11 +416,11 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) {
// Tests that receipt data can be correctly derived from the contextual infos
func TestDeriveFields(t *testing.T) {
// Re-derive receipts.
- baseFee := big.NewInt(1000)
+ basefee := big.NewInt(1000)
blobGasPrice := big.NewInt(920)
receipts := getTestReceipts()
derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, baseFee, blobGasPrice, txs)
+ err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs)
if err != nil {
t.Fatalf("DeriveFields(...) = %v, want ", err)
}
@@ -728,499 +651,3 @@ func clearComputedFieldsOnLogs(logs []*Log) []*Log {
}
return l
}
-
-func getOptimismEcotoneTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobGasPrice, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar *uint64) ([]*Transaction, []*Receipt) {
- // Create a few transactions to have receipts for
- txs := Transactions{
- NewTx(&DepositTx{
- To: nil, // contract creation
- Value: big.NewInt(6),
- Gas: 50,
- Data: l1AttributesPayload,
- }),
- emptyTx,
- }
-
- // Create the corresponding receipts
- receipts := Receipts{
- &Receipt{
- Type: DepositTxType,
- PostState: common.Hash{5}.Bytes(),
- CumulativeGasUsed: 50 + 15,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 0,
- },
- {
- Address: common.BytesToAddress([]byte{0x03, 0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 1,
- },
- },
- TxHash: txs[0].Hash(),
- ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"),
- GasUsed: 65,
- EffectiveGasPrice: big.NewInt(0),
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 0,
- DepositNonce: &depNonce1,
- },
- &Receipt{
- Type: LegacyTxType,
- EffectiveGasPrice: big.NewInt(0),
- PostState: common.Hash{4}.Bytes(),
- CumulativeGasUsed: 10,
- Logs: []*Log{},
- // derived fields:
- TxHash: txs[1].Hash(),
- GasUsed: 18446744073709551561,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 1,
- L1GasPrice: l1GasPrice,
- L1BlobBaseFee: l1BlobGasPrice,
- L1GasUsed: l1GasUsed,
- L1Fee: l1Fee,
- L1BaseFeeScalar: baseFeeScalar,
- L1BlobBaseFeeScalar: blobBaseFeeScalar,
- },
- }
- for _, receipt := range receipts {
- receipt.Bloom = CreateBloom(receipt)
- }
-
- return txs, receipts
-}
-
-func getOptimismIsthmusTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1BlobGasPrice, l1GasUsed, l1Fee *big.Int, baseFeeScalar, blobBaseFeeScalar, operatorFeeScalar, operatorFeeConstant *uint64) ([]*Transaction, []*Receipt) {
- // Create a few transactions to have receipts for
- txs := Transactions{
- NewTx(&DepositTx{
- To: nil, // contract creation
- Value: big.NewInt(6),
- Gas: 50,
- Data: l1AttributesPayload,
- }),
- emptyTx,
- }
-
- // Create the corresponding receipts
- receipts := Receipts{
- &Receipt{
- Type: DepositTxType,
- PostState: common.Hash{5}.Bytes(),
- CumulativeGasUsed: 50 + 15,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 0,
- },
- {
- Address: common.BytesToAddress([]byte{0x03, 0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 1,
- },
- },
- TxHash: txs[0].Hash(),
- ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"),
- GasUsed: 65,
- EffectiveGasPrice: big.NewInt(0),
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 0,
- DepositNonce: &depNonce1,
- },
- &Receipt{
- Type: LegacyTxType,
- EffectiveGasPrice: big.NewInt(0),
- PostState: common.Hash{4}.Bytes(),
- CumulativeGasUsed: 10,
- Logs: []*Log{},
- // derived fields:
- TxHash: txs[1].Hash(),
- GasUsed: 18446744073709551561,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 1,
- L1GasPrice: l1GasPrice,
- L1BlobBaseFee: l1BlobGasPrice,
- L1GasUsed: l1GasUsed,
- L1Fee: l1Fee,
- L1BaseFeeScalar: baseFeeScalar,
- L1BlobBaseFeeScalar: blobBaseFeeScalar,
- OperatorFeeScalar: operatorFeeScalar,
- OperatorFeeConstant: operatorFeeConstant,
- },
- }
- for _, receipt := range receipts {
- receipt.Bloom = CreateBloom(receipt)
- }
-
- return txs, receipts
-}
-
-func getOptimismTxReceipts(l1AttributesPayload []byte, l1GasPrice, l1GasUsed, l1Fee *big.Int, feeScalar *big.Float) ([]*Transaction, []*Receipt) {
- // Create a few transactions to have receipts for
- txs := Transactions{
- NewTx(&DepositTx{
- To: nil, // contract creation
- Value: big.NewInt(6),
- Gas: 50,
- Data: l1AttributesPayload,
- }),
- emptyTx,
- }
-
- // Create the corresponding receipts
- receipts := Receipts{
- &Receipt{
- Type: DepositTxType,
- PostState: common.Hash{5}.Bytes(),
- CumulativeGasUsed: 50 + 15,
- Logs: []*Log{
- {
- Address: common.BytesToAddress([]byte{0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 0,
- },
- {
- Address: common.BytesToAddress([]byte{0x03, 0x33}),
- // derived fields:
- BlockNumber: blockNumber.Uint64(),
- TxHash: txs[0].Hash(),
- TxIndex: 0,
- BlockHash: blockHash,
- Index: 1,
- },
- },
- TxHash: txs[0].Hash(),
- ContractAddress: common.HexToAddress("0x3bb898b4bbe24f68a4e9be46cfe72d1787fd74f4"),
- GasUsed: 65,
- EffectiveGasPrice: big.NewInt(0),
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 0,
- DepositNonce: &depNonce1,
- },
- &Receipt{
- Type: LegacyTxType,
- EffectiveGasPrice: big.NewInt(0),
- PostState: common.Hash{4}.Bytes(),
- CumulativeGasUsed: 10,
- Logs: []*Log{},
- // derived fields:
- TxHash: txs[1].Hash(),
- GasUsed: 18446744073709551561,
- BlockHash: blockHash,
- BlockNumber: blockNumber,
- TransactionIndex: 1,
- L1GasPrice: l1GasPrice,
- L1GasUsed: l1GasUsed,
- L1Fee: l1Fee,
- FeeScalar: feeScalar,
- },
- }
- for _, receipt := range receipts {
- receipt.Bloom = CreateBloom(receipt)
- }
-
- return txs, receipts
-}
-
-func TestDeriveOptimismBedrockTxReceipts(t *testing.T) {
- // Bedrock style l1 attributes with L1Scalar=7_000_000 (becomes 7 after division), L1Overhead=50, L1BaseFee=1000*1e6
- payload := common.Hex2Bytes("015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0015d8eb900000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000003b9aca0000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000000000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000006acfc0")
- // the parameters we use below are defined in rollup_test.go
- l1GasPrice := baseFee
- l1GasUsed := bedrockGas
- feeScalar := big.NewFloat(float64(scalar.Uint64() / 1e6))
- l1Fee := bedrockFee
- txs, receipts := getOptimismTxReceipts(payload, l1GasPrice, l1GasUsed, l1Fee, feeScalar)
-
- // Re-derive receipts.
- baseFee := big.NewInt(1000)
- derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err != nil {
- t.Fatalf("DeriveFields(...) = %v, want ", err)
- }
- checkBedrockReceipts(t, receipts, derivedReceipts)
-
- // Should get same result with the Ecotone config because it will assume this is "first ecotone block"
- // if it sees the bedrock style L1 attributes.
- err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err != nil {
- t.Fatalf("DeriveFields(...) = %v, want ", err)
- }
- checkBedrockReceipts(t, receipts, derivedReceipts)
-}
-
-func TestDeriveOptimismEcotoneTxReceipts(t *testing.T) {
- // Ecotone style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6
- payload := common.Hex2Bytes("440a5e20000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2")
- // the parameters we use below are defined in rollup_test.go
- baseFeeScalarUint64 := baseFeeScalar.Uint64()
- blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
- txs, receipts := getOptimismEcotoneTxReceipts(payload, baseFee, blobBaseFee, ecotoneGas, ecotoneFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64)
-
- // Re-derive receipts.
- baseFee := big.NewInt(1000)
- derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- // Should error out if we try to process this with a pre-Ecotone config
- err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err == nil {
- t.Fatalf("expected error from deriving ecotone receipts with pre-ecotone config, got none")
- }
-
- err = Receipts(derivedReceipts).DeriveFields(ecotoneTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err != nil {
- t.Fatalf("DeriveFields(...) = %v, want ", err)
- }
- diffReceipts(t, receipts, derivedReceipts)
-}
-
-func TestDeriveOptimismIsthmusTxReceipts(t *testing.T) {
- // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=7, operatorFeeConstant=9
- payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d255c6fb7c116fb15b44847d04")
- // the parameters we use below are defined in rollup_test.go
- baseFeeScalarUint64 := baseFeeScalar.Uint64()
- blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
- operatorFeeScalarUint64 := operatorFeeScalar.Uint64()
- operatorFeeConstantUint64 := operatorFeeConstant.Uint64()
- txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, &operatorFeeScalarUint64, &operatorFeeConstantUint64)
-
- // Re-derive receipts.
- baseFee := big.NewInt(1000)
- derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- // Should error out if we try to process this with a pre-Isthmus config
- err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err == nil {
- t.Fatalf("expected error from deriving isthmus receipts with pre-isthmus config, got none")
- }
-
- err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err != nil {
- t.Fatalf("DeriveFields(...) = %v, want ", err)
- }
- diffReceipts(t, receipts, derivedReceipts)
-}
-
-func TestDeriveOptimismIsthmusTxReceiptsNoOperatorFee(t *testing.T) {
- // Isthmus style l1 attributes with baseFeeScalar=2, blobBaseFeeScalar=3, baseFee=1000*1e6, blobBaseFee=10*1e6, operatorFeeScalar=7, operatorFeeConstant=9
- payload := common.Hex2Bytes("098999be000000020000000300000000000004d200000000000004d200000000000004d2000000000000000000000000000000000000000000000000000000003b9aca00000000000000000000000000000000000000000000000000000000000098968000000000000000000000000000000000000000000000000000000000000004d200000000000000000000000000000000000000000000000000000000000004d2000000000000000000000000")
- // the parameters we use below are defined in rollup_test.go
- baseFeeScalarUint64 := baseFeeScalar.Uint64()
- blobBaseFeeScalarUint64 := blobBaseFeeScalar.Uint64()
- txs, receipts := getOptimismIsthmusTxReceipts(payload, baseFee, blobBaseFee, minimumFjordGas, fjordFee, &baseFeeScalarUint64, &blobBaseFeeScalarUint64, nil, nil)
-
- // Re-derive receipts.
- baseFee := big.NewInt(1000)
- derivedReceipts := clearComputedFieldsOnReceipts(receipts)
- // Should error out if we try to process this with a pre-Isthmus config
- err := Receipts(derivedReceipts).DeriveFields(bedrockGenesisTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err == nil {
- t.Fatalf("expected error from deriving isthmus receipts with pre-isthmus config, got none")
- }
-
- err = Receipts(derivedReceipts).DeriveFields(isthmusTestConfig, blockHash, blockNumber.Uint64(), 0, baseFee, nil, txs)
- if err != nil {
- t.Fatalf("DeriveFields(...) = %v, want ", err)
- }
- diffReceipts(t, receipts, derivedReceipts)
-}
-
-func diffReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) {
- // Check diff of receipts against derivedReceipts.
- r1, err := json.MarshalIndent(receipts, "", " ")
- if err != nil {
- t.Fatal("error marshaling input receipts:", err)
- }
- r2, err := json.MarshalIndent(derivedReceipts, "", " ")
- if err != nil {
- t.Fatal("error marshaling derived receipts:", err)
- }
- d := diff.Diff(string(r1), string(r2))
- if d != "" {
- t.Fatal("receipts differ:", d)
- }
-}
-
-func checkBedrockReceipts(t *testing.T, receipts, derivedReceipts []*Receipt) {
- diffReceipts(t, receipts, derivedReceipts)
-
- // Check that we preserved the invariant: l1Fee = l1GasPrice * l1GasUsed * l1FeeScalar
- // but with more difficult int math...
- l2Rcpt := derivedReceipts[1]
- l1GasCost := new(big.Int).Mul(l2Rcpt.L1GasPrice, l2Rcpt.L1GasUsed)
- l1Fee := new(big.Float).Mul(new(big.Float).SetInt(l1GasCost), l2Rcpt.FeeScalar)
- require.Equal(t, new(big.Float).SetInt(l2Rcpt.L1Fee), l1Fee)
-}
-
-func TestBedrockDepositReceiptUnchanged(t *testing.T) {
- expectedRlp := common.FromHex("7EF90156A003000000000000000000000000000000000000000000000000000000000000000AB9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000F0D7940000000000000000000000000000000000000033C001D7940000000000000000000000000000000000000333C002")
- // Deposit receipt with no nonce
- receipt := &Receipt{
- Type: DepositTxType,
- PostState: common.Hash{3}.Bytes(),
- CumulativeGasUsed: 10,
- Logs: []*Log{
- {Address: common.BytesToAddress([]byte{0x33}), Data: []byte{1}, Topics: []common.Hash{}},
- {Address: common.BytesToAddress([]byte{0x03, 0x33}), Data: []byte{2}, Topics: []common.Hash{}},
- },
- TxHash: common.Hash{},
- ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}),
- GasUsed: 4,
- }
-
- rlp, err := receipt.MarshalBinary()
- require.NoError(t, err)
- require.Equal(t, expectedRlp, rlp)
-
- // ConsensusCoord values should be unchanged after reparsing
- parsed := new(Receipt)
- err = parsed.UnmarshalBinary(rlp)
- require.NoError(t, err)
- require.Equal(t, receipt.Status, parsed.Status)
- require.Equal(t, receipt.CumulativeGasUsed, parsed.CumulativeGasUsed)
- require.Equal(t, receipt.Bloom, parsed.Bloom)
- require.EqualValues(t, receipt.Logs, parsed.Logs)
- // And still shouldn't have a nonce
- require.Nil(t, parsed.DepositNonce)
- // ..or a deposit nonce
- require.Nil(t, parsed.DepositReceiptVersion)
-}
-
-// Regolith introduced an inconsistency in behavior between EncodeIndex and MarshalBinary for a
-// deposit transaction receipt. TestReceiptEncodeIndexBugIsEnshrined makes sure this difference is
-// preserved for backwards compatibility purposes, but also that there is no discrepancy for the
-// post-Canyon encoding.
-func TestReceiptEncodeIndexBugIsEnshrined(t *testing.T) {
- // Check that a post-Regolith, pre-Canyon receipt produces the expected difference between
- // EncodeIndex and MarshalBinary.
- buf := new(bytes.Buffer)
- receipts := Receipts{depositReceiptWithNonce}
- receipts.EncodeIndex(0, buf)
- indexBytes := buf.Bytes()
-
- regularBytes, _ := receipts[0].MarshalBinary()
-
- require.NotEqual(t, indexBytes, regularBytes)
-
- // Confirm the buggy encoding is as expected, which means it should encode as if it had no
- // nonce specified (like that of a non-deposit receipt, whose encoding would differ only in the
- // type byte).
- buf.Reset()
- tempReceipt := *depositReceiptWithNonce
- tempReceipt.Type = eip1559Receipt.Type
- buggyBytes, _ := tempReceipt.MarshalBinary()
-
- require.Equal(t, indexBytes[1:], buggyBytes[1:])
-
- // check that the post-Canyon encoding has no differences between EncodeIndex and
- // MarshalBinary.
- buf.Reset()
- receipts = Receipts{depositReceiptWithNonceAndVersion}
- receipts.EncodeIndex(0, buf)
- indexBytes = buf.Bytes()
-
- regularBytes, _ = receipts[0].MarshalBinary()
-
- require.Equal(t, indexBytes, regularBytes)
-
- // Check that bumping the nonce post-canyon changes the hash
- bumpedReceipt := *depositReceiptWithNonceAndVersion
- bumpedNonce := nonce + 1
- bumpedReceipt.DepositNonce = &bumpedNonce
- bumpedBytes, _ := bumpedReceipt.MarshalBinary()
- require.NotEqual(t, regularBytes, bumpedBytes)
-}
-
-func TestRoundTripReceipt(t *testing.T) {
- tests := []struct {
- name string
- rcpt *Receipt
- }{
- {name: "Legacy", rcpt: legacyReceipt},
- {name: "AccessList", rcpt: accessListReceipt},
- {name: "EIP1559", rcpt: eip1559Receipt},
- {name: "DepositNoNonce", rcpt: depositReceiptNoNonce},
- {name: "DepositWithNonce", rcpt: depositReceiptWithNonce},
- {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion},
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- data, err := test.rcpt.MarshalBinary()
- require.NoError(t, err)
-
- d := &Receipt{}
- err = d.UnmarshalBinary(data)
- require.NoError(t, err)
- require.Equal(t, test.rcpt, d)
- require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce)
- require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion)
- })
-
- t.Run(fmt.Sprintf("%sRejectExtraData", test.name), func(t *testing.T) {
- data, err := test.rcpt.MarshalBinary()
- require.NoError(t, err)
- data = append(data, 1, 2, 3, 4)
- d := &Receipt{}
- err = d.UnmarshalBinary(data)
- require.Error(t, err)
- })
- }
-}
-
-func TestRoundTripReceiptForStorage(t *testing.T) {
- tests := []struct {
- name string
- rcpt *Receipt
- }{
- {name: "Legacy", rcpt: legacyReceipt},
- {name: "AccessList", rcpt: accessListReceipt},
- {name: "EIP1559", rcpt: eip1559Receipt},
- {name: "DepositNoNonce", rcpt: depositReceiptNoNonce},
- {name: "DepositWithNonce", rcpt: depositReceiptWithNonce},
- {name: "DepositWithNonceAndVersion", rcpt: depositReceiptWithNonceAndVersion},
- }
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- data, err := rlp.EncodeToBytes((*ReceiptForStorage)(test.rcpt))
- require.NoError(t, err)
-
- d := &ReceiptForStorage{}
- err = rlp.DecodeBytes(data, d)
- require.NoError(t, err)
- // Only check the stored fields - the others are derived later
- require.Equal(t, test.rcpt.Status, d.Status)
- require.Equal(t, test.rcpt.CumulativeGasUsed, d.CumulativeGasUsed)
- require.Equal(t, test.rcpt.Logs, d.Logs)
- require.Equal(t, test.rcpt.DepositNonce, d.DepositNonce)
- require.Equal(t, test.rcpt.DepositReceiptVersion, d.DepositReceiptVersion)
- })
- }
-}
diff --git a/core/types/rollup_cost.go b/core/types/rollup_cost.go
index 529e847ccf..ffbdd2098f 100644
--- a/core/types/rollup_cost.go
+++ b/core/types/rollup_cost.go
@@ -19,6 +19,7 @@ package types
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"math/big"
@@ -41,6 +42,9 @@ const (
// array. baseFeeScalar is in the first four bytes of the segment, blobBaseFeeScalar the next
// four.
scalarSectionStart = 32 - BaseFeeScalarSlotOffset - 4
+
+ IsthmusL1AttributesLen = 176
+ JovianL1AttributesLen = 178
)
func init() {
@@ -57,6 +61,8 @@ var (
EcotoneL1AttributesSelector = []byte{0x44, 0x0a, 0x5e, 0x20}
// IsthmusL1AttributesSelector is the selector indicating Isthmus style L1 gas attributes.
IsthmusL1AttributesSelector = []byte{0x09, 0x89, 0x99, 0xbe}
+ // JovianL1AttributesSelector is the selector indicating Jovian style L1 gas attributes.
+ JovianL1AttributesSelector = []byte{0x3d, 0xb6, 0xbe, 0x2b}
// L1BlockAddr is the address of the L1Block contract which stores the L1 gas attributes.
L1BlockAddr = common.HexToAddress("0x4200000000000000000000000000000000000015")
@@ -77,6 +83,7 @@ var (
// attributes
OperatorFeeParamsSlot = common.BigToHash(big.NewInt(8))
+ oneHundred = big.NewInt(100)
oneMillion = big.NewInt(1_000_000)
ecotoneDivisor = big.NewInt(1_000_000 * 16)
fjordDivisor = big.NewInt(1_000_000_000_000)
@@ -226,7 +233,11 @@ func NewOperatorCostFunc(config *params.ChainConfig, statedb StateGetter) Operat
}
operatorFeeScalar, operatorFeeConstant := ExtractOperatorFeeParams(operatorFeeParams)
- return newOperatorCostFunc(operatorFeeScalar, operatorFeeConstant)
+ // Return the Operator Fee fix version if the feature is active
+ if config.IsOperatorFeeFix(blockTime) {
+ return newOperatorCostFuncOperatorFeeFix(operatorFeeScalar, operatorFeeConstant)
+ }
+ return newOperatorCostFuncIsthmus(operatorFeeScalar, operatorFeeConstant)
}
return func(gas uint64, blockTime uint64) *uint256.Int {
@@ -239,7 +250,8 @@ func NewOperatorCostFunc(config *params.ChainConfig, statedb StateGetter) Operat
}
}
-func newOperatorCostFunc(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc {
+// newOperatorCostFuncIsthmus returns the operator cost function introduced with Isthmus.
+func newOperatorCostFuncIsthmus(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc {
return func(gas uint64) *uint256.Int {
fee := new(big.Int).SetUint64(gas)
fee = fee.Mul(fee, operatorFeeScalar)
@@ -248,7 +260,25 @@ func newOperatorCostFunc(operatorFeeScalar *big.Int, operatorFeeConstant *big.In
feeU256, overflow := uint256.FromBig(fee)
if overflow {
- // This should never happen, as (u64.max * u32.max / 1e6) + u64.max is an int of bit length 77
+ // This should never happen, as ((u64.max * u32.max) / 1e6) + u64.max fits in 77 bits
+ panic("overflow in operator cost calculation")
+ }
+
+ return feeU256
+ }
+}
+
+// newOperatorCostFuncOperatorFeeFix returns the operator cost function for the operator fee fix feature.
+func newOperatorCostFuncOperatorFeeFix(operatorFeeScalar *big.Int, operatorFeeConstant *big.Int) operatorCostFunc {
+ return func(gas uint64) *uint256.Int {
+ fee := new(big.Int).SetUint64(gas)
+ fee = fee.Mul(fee, operatorFeeScalar)
+ fee = fee.Mul(fee, oneHundred)
+ fee = fee.Add(fee, operatorFeeConstant)
+
+ feeU256, overflow := uint256.FromBig(fee)
+ if overflow {
+ // This should never happen, as (u64.max * u32.max * 100) + u64.max fits in 103 bits
panic("overflow in operator cost calculation")
}
@@ -512,6 +542,54 @@ func extractL1GasParamsPostIsthmus(data []byte) (gasParams, error) {
}, nil
}
+// ExtractDAFootprintGasScalar extracts the DA footprint gas scalar from the L1 attributes transaction data
+// of a Jovian-enabled block.
+func ExtractDAFootprintGasScalar(data []byte) (uint16, error) {
+ if len(data) < JovianL1AttributesLen {
+ return 0, fmt.Errorf("L1 attributes transaction data too short for DA footprint gas scalar: %d", len(data))
+ }
+ // Future forks need to be added here
+ if !bytes.Equal(data[0:4], JovianL1AttributesSelector) {
+ return 0, fmt.Errorf("L1 attributes transaction data does not have Jovian selector")
+ }
+ daFootprintGasScalar := binary.BigEndian.Uint16(data[JovianL1AttributesLen-2 : JovianL1AttributesLen])
+ return daFootprintGasScalar, nil
+}
+
+// CalcDAFootprint calculates the total DA footprint of a block for an OP Stack chain.
+// Jovian introduces a DA footprint block limit which is stored in the BlobGasUsed header field and that is taken
+// into account during base fee updates.
+// CalcDAFootprint must not be called for pre-Jovian blocks.
+func CalcDAFootprint(txs []*Transaction) (uint64, error) {
+ if len(txs) == 0 || !txs[0].IsDepositTx() {
+ return 0, errors.New("missing deposit transaction")
+ }
+
+ // First Jovian block doesn't set the DA footprint gas scalar yet and
+ // it must not have user transactions.
+ data := txs[0].Data()
+ if len(data) == IsthmusL1AttributesLen {
+ if !txs[len(txs)-1].IsDepositTx() {
+ // sufficient to check last transaction because deposits precede non-deposit txs
+ return 0, errors.New("unexpected non-deposit transactions in Jovian activation block")
+ }
+ return 0, nil
+ } // ExtractDAFootprintGasScalar catches all invalid lengths
+
+ daFootprintGasScalar, err := ExtractDAFootprintGasScalar(data)
+ if err != nil {
+ return 0, err
+ }
+ var daFootprint uint64
+ for _, tx := range txs {
+ if tx.IsDepositTx() {
+ continue
+ }
+ daFootprint += tx.RollupCostData().EstimatedDASize().Uint64() * uint64(daFootprintGasScalar)
+ }
+ return daFootprint, nil
+}
+
// L1Cost computes the the data availability fee for transactions in blocks prior to the Ecotone
// upgrade. It is used by e2e tests so must remain exported.
func L1Cost(rollupDataGas uint64, l1BaseFee, overhead, scalar *big.Int) *big.Int {
@@ -572,13 +650,13 @@ func ExtractEcotoneFeeParams(l1FeeParams []byte) (l1BaseFeeScalar, l1BlobBaseFee
offset := scalarSectionStart
l1BaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset : offset+4])
l1BlobBaseFeeScalar = new(big.Int).SetBytes(l1FeeParams[offset+4 : offset+8])
- return
+ return l1BaseFeeScalar, l1BlobBaseFeeScalar
}
func ExtractOperatorFeeParams(operatorFeeParams common.Hash) (operatorFeeScalar, operatorFeeConstant *big.Int) {
operatorFeeScalar = new(big.Int).SetBytes(operatorFeeParams[20:24])
operatorFeeConstant = new(big.Int).SetBytes(operatorFeeParams[24:32])
- return
+ return operatorFeeScalar, operatorFeeConstant
}
func bedrockCalldataGasUsed(costData RollupCostData) (calldataGasUsed *big.Int) {
diff --git a/core/types/rollup_cost_test.go b/core/types/rollup_cost_test.go
index b595a14f62..c07b53f023 100644
--- a/core/types/rollup_cost_test.go
+++ b/core/types/rollup_cost_test.go
@@ -33,6 +33,7 @@ var (
// the emptyTx is out of bounds for the linear regression so it uses the minimum size
fjordFee = big.NewInt(3203000) // 100_000_000 * (2 * 1000 * 1e6 * 16 + 3 * 10 * 1e6) / 1e12
ithmusOperatorFee = uint256.NewInt(1256417826611659930) // 1618 * 1439103868 / 1e6 + 1256417826609331460
+ jovianOperatorFee = uint256.NewInt(1256650673615173860) // 1618 * 1439103868 * 100 + 1256417826609331460
bedrockGas = big.NewInt(1618)
regolithGas = big.NewInt(530) // 530 = 1618 - (16*68)
@@ -461,6 +462,13 @@ func TestNewOperatorCostFunc(t *testing.T) {
fee = costFunc(bedrockGas.Uint64(), time)
require.NotNil(t, fee)
require.Equal(t, ithmusOperatorFee, fee)
+
+ // emptyTx fee w/ jovian config should be not 0
+ config.JovianTime = &time
+ costFunc = NewOperatorCostFunc(config, statedb)
+ fee = costFunc(bedrockGas.Uint64(), time)
+ require.NotNil(t, fee)
+ require.Equal(t, jovianOperatorFee, fee)
}
func TestFlzCompressLen(t *testing.T) {
@@ -516,14 +524,16 @@ var emptyTxWithGas = NewTransaction(
// combines the L1 cost and operator cost.
func TestTotalRollupCostFunc(t *testing.T) {
zero := uint64(0)
- later := uint64(10)
+ isthmusTime := uint64(10)
+ jovianTime := uint64(20)
config := ¶ms.ChainConfig{
Optimism: params.OptimismTestConfig.Optimism,
RegolithTime: &zero,
EcotoneTime: &zero,
FjordTime: &zero,
HoloceneTime: &zero,
- IsthmusTime: &later,
+ IsthmusTime: &isthmusTime,
+ JovianTime: &jovianTime,
}
statedb := &testStateGetter{
baseFee: baseFee,
@@ -537,13 +547,24 @@ func TestTotalRollupCostFunc(t *testing.T) {
}
costFunc := NewTotalRollupCostFunc(config, statedb)
- cost := costFunc(emptyTxWithGas, later-1)
+
+ // Pre-Isthmus: only L1 cost
+ cost := costFunc(emptyTxWithGas, isthmusTime-1)
require.NotNil(t, cost)
expCost := uint256.MustFromBig(fjordFee)
require.Equal(t, expCost, cost, "pre-Isthmus total rollup cost should only contain L1 cost")
- cost = costFunc(emptyTxWithGas, later+1)
+ // Isthmus: L1 cost + Isthmus operator cost
+ cost = costFunc(emptyTxWithGas, isthmusTime+1)
require.NotNil(t, cost)
+ expCost = uint256.MustFromBig(fjordFee)
expCost.Add(expCost, ithmusOperatorFee)
- require.Equal(t, expCost, cost, "Isthmus total rollup cost should contain L1 cost and operator cost")
+ require.Equal(t, expCost, cost, "Isthmus total rollup cost should contain L1 cost and Isthmus operator cost")
+
+ // Jovian: L1 cost + fixed operator cost
+ cost = costFunc(emptyTxWithGas, jovianTime+1)
+ require.NotNil(t, cost)
+ expCost = uint256.MustFromBig(fjordFee)
+ expCost.Add(expCost, jovianOperatorFee)
+ require.Equal(t, expCost, cost, "Jovian total rollup cost should contain L1 cost and Jovian operator cost")
}
diff --git a/core/types/transaction.go b/core/types/transaction.go
index fe95aa9d66..90c6a46850 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/uint256"
)
var (
@@ -37,6 +38,7 @@ var (
ErrInvalidTxType = errors.New("transaction type not valid in this context")
ErrTxTypeNotSupported = errors.New("transaction type not supported")
ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
+ ErrUint256Overflow = errors.New("bigint overflow, too large for uint256")
errShortTypedTx = errors.New("typed transaction too short")
errInvalidYParity = errors.New("'yParity' field must be 0 or 1")
errVYParityMismatch = errors.New("'v' and 'yParity' fields do not match")
@@ -452,59 +454,71 @@ func (tx *Transaction) GasTipCapIntCmp(other *big.Int) int {
}
// EffectiveGasTip returns the effective miner gasTipCap for the given base fee.
-// Note: if the effective gasTipCap is negative, this method returns both error
-// the actual negative value, _and_ ErrGasFeeCapTooLow
+// Note: if the effective gasTipCap would be negative, this method
+// returns ErrGasFeeCapTooLow, and value is undefined.
func (tx *Transaction) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) {
- dst := new(big.Int)
- err := tx.calcEffectiveGasTip(dst, baseFee)
- return dst, err
+ dst := new(uint256.Int)
+ base := new(uint256.Int)
+ if baseFee != nil {
+ if base.SetFromBig(baseFee) {
+ return nil, ErrUint256Overflow
+ }
+ }
+ err := tx.calcEffectiveGasTip(dst, base)
+ return dst.ToBig(), err
}
// calcEffectiveGasTip calculates the effective gas tip of the transaction and
// saves the result to dst.
-func (tx *Transaction) calcEffectiveGasTip(dst *big.Int, baseFee *big.Int) error {
+func (tx *Transaction) calcEffectiveGasTip(dst *uint256.Int, baseFee *uint256.Int) error {
if tx.Type() == DepositTxType {
- dst.Set(common.Big0)
+ dst.Set(uint256.NewInt(0))
return nil
}
if baseFee == nil {
- dst.Set(tx.inner.gasTipCap())
+ if dst.SetFromBig(tx.inner.gasTipCap()) {
+ return ErrUint256Overflow
+ }
return nil
}
var err error
- gasFeeCap := tx.inner.gasFeeCap()
- if gasFeeCap.Cmp(baseFee) < 0 {
+ if dst.SetFromBig(tx.inner.gasFeeCap()) {
+ return ErrUint256Overflow
+ }
+ if dst.Cmp(baseFee) < 0 {
err = ErrGasFeeCapTooLow
}
- dst.Sub(gasFeeCap, baseFee)
- gasTipCap := tx.inner.gasTipCap()
+ dst.Sub(dst, baseFee)
+ gasTipCap := new(uint256.Int)
+ if gasTipCap.SetFromBig(tx.inner.gasTipCap()) {
+ return ErrUint256Overflow
+ }
if gasTipCap.Cmp(dst) < 0 {
dst.Set(gasTipCap)
}
return err
}
-// EffectiveGasTipCmp compares the effective gasTipCap of two transactions assuming the given base fee.
-func (tx *Transaction) EffectiveGasTipCmp(other *Transaction, baseFee *big.Int) int {
+func (tx *Transaction) EffectiveGasTipCmp(other *Transaction, baseFee *uint256.Int) int {
if baseFee == nil {
return tx.GasTipCapCmp(other)
}
// Use more efficient internal method.
- txTip, otherTip := new(big.Int), new(big.Int)
+ txTip, otherTip := new(uint256.Int), new(uint256.Int)
tx.calcEffectiveGasTip(txTip, baseFee)
other.calcEffectiveGasTip(otherTip, baseFee)
return txTip.Cmp(otherTip)
}
// EffectiveGasTipIntCmp compares the effective gasTipCap of a transaction to the given gasTipCap.
-func (tx *Transaction) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) int {
+func (tx *Transaction) EffectiveGasTipIntCmp(other *uint256.Int, baseFee *uint256.Int) int {
if baseFee == nil {
- return tx.GasTipCapIntCmp(other)
+ return tx.GasTipCapIntCmp(other.ToBig())
}
- txTip := new(big.Int)
+ txTip := new(uint256.Int)
tx.calcEffectiveGasTip(txTip, baseFee)
return txTip.Cmp(other)
}
diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go
index 7d5e2f058a..cc41674dfd 100644
--- a/core/types/transaction_test.go
+++ b/core/types/transaction_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/uint256"
)
// The values in those tests are from the Transaction Tests
@@ -609,12 +610,12 @@ func BenchmarkEffectiveGasTip(b *testing.B) {
Data: nil,
}
tx, _ := SignNewTx(key, signer, txdata)
- baseFee := big.NewInt(1000000000) // 1 gwei
+ baseFee := uint256.NewInt(1000000000) // 1 gwei
b.Run("Original", func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- _, err := tx.EffectiveGasTip(baseFee)
+ _, err := tx.EffectiveGasTip(baseFee.ToBig())
if err != nil {
b.Fatal(err)
}
@@ -623,7 +624,7 @@ func BenchmarkEffectiveGasTip(b *testing.B) {
b.Run("IntoMethod", func(b *testing.B) {
b.ReportAllocs()
- dst := new(big.Int)
+ dst := new(uint256.Int)
for i := 0; i < b.N; i++ {
err := tx.calcEffectiveGasTip(dst, baseFee)
if err != nil {
@@ -634,9 +635,6 @@ func BenchmarkEffectiveGasTip(b *testing.B) {
}
func TestEffectiveGasTipInto(t *testing.T) {
- signer := LatestSigner(params.TestChainConfig)
- key, _ := crypto.GenerateKey()
-
testCases := []struct {
tipCap int64
feeCap int64
@@ -652,8 +650,26 @@ func TestEffectiveGasTipInto(t *testing.T) {
{tipCap: 50, feeCap: 100, baseFee: nil}, // nil base fee
}
+ // original, non-allocation golfed version
+ orig := func(tx *Transaction, baseFee *big.Int) (*big.Int, error) {
+ if baseFee == nil {
+ return tx.GasTipCap(), nil
+ }
+ var err error
+ gasFeeCap := tx.GasFeeCap()
+ if gasFeeCap.Cmp(baseFee) < 0 {
+ err = ErrGasFeeCapTooLow
+ }
+ gasFeeCap = gasFeeCap.Sub(gasFeeCap, baseFee)
+ gasTipCap := tx.GasTipCap()
+ if gasTipCap.Cmp(gasFeeCap) < 0 {
+ return gasTipCap, err
+ }
+ return gasFeeCap, err
+ }
+
for i, tc := range testCases {
- txdata := &DynamicFeeTx{
+ tx := NewTx(&DynamicFeeTx{
ChainID: big.NewInt(1),
Nonce: 0,
GasTipCap: big.NewInt(tc.tipCap),
@@ -662,27 +678,28 @@ func TestEffectiveGasTipInto(t *testing.T) {
To: &common.Address{},
Value: big.NewInt(0),
Data: nil,
- }
- tx, _ := SignNewTx(key, signer, txdata)
+ })
var baseFee *big.Int
+ var baseFee2 *uint256.Int
if tc.baseFee != nil {
baseFee = big.NewInt(*tc.baseFee)
+ baseFee2 = uint256.NewInt(uint64(*tc.baseFee))
}
// Get result from original method
- orig, origErr := tx.EffectiveGasTip(baseFee)
+ orig, origErr := orig(tx, baseFee)
// Get result from new method
- dst := new(big.Int)
- newErr := tx.calcEffectiveGasTip(dst, baseFee)
+ dst := new(uint256.Int)
+ newErr := tx.calcEffectiveGasTip(dst, baseFee2)
// Compare results
if (origErr != nil) != (newErr != nil) {
t.Fatalf("case %d: error mismatch: orig %v, new %v", i, origErr, newErr)
}
- if orig.Cmp(dst) != 0 {
+ if origErr == nil && orig.Cmp(dst.ToBig()) != 0 {
t.Fatalf("case %d: result mismatch: orig %v, new %v", i, orig, dst)
}
}
@@ -692,3 +709,28 @@ func TestEffectiveGasTipInto(t *testing.T) {
func intPtr(i int64) *int64 {
return &i
}
+
+func BenchmarkEffectiveGasTipCmp(b *testing.B) {
+ signer := LatestSigner(params.TestChainConfig)
+ key, _ := crypto.GenerateKey()
+ txdata := &DynamicFeeTx{
+ ChainID: big.NewInt(1),
+ Nonce: 0,
+ GasTipCap: big.NewInt(2000000000),
+ GasFeeCap: big.NewInt(3000000000),
+ Gas: 21000,
+ To: &common.Address{},
+ Value: big.NewInt(0),
+ Data: nil,
+ }
+ tx, _ := SignNewTx(key, signer, txdata)
+ other, _ := SignNewTx(key, signer, txdata)
+ baseFee := uint256.NewInt(1000000000) // 1 gwei
+
+ b.Run("Original", func(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ tx.EffectiveGasTipCmp(other, baseFee)
+ }
+ })
+}
diff --git a/core/types/withdrawal.go b/core/types/withdrawal.go
index 6f99e53b56..2cf00195a0 100644
--- a/core/types/withdrawal.go
+++ b/core/types/withdrawal.go
@@ -49,7 +49,7 @@ type Withdrawals []*Withdrawal
// Len returns the length of s.
func (s Withdrawals) Len() int { return len(s) }
-var withdrawalSize = int(reflect.TypeOf(Withdrawal{}).Size())
+var withdrawalSize = int(reflect.TypeFor[Withdrawal]().Size())
func (s Withdrawals) Size() int {
return withdrawalSize * len(s)
diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go
index 428aafc83c..584e790433 100644
--- a/core/verkle_witness_test.go
+++ b/core/verkle_witness_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
+ "fmt"
"math/big"
"slices"
"testing"
@@ -202,12 +203,15 @@ func TestProcessVerkle(t *testing.T) {
t.Log("verified verkle proof, inserting blocks into the chain")
+ for i, b := range chain {
+ fmt.Printf("%d %x\n", i, b.Root())
+ }
endnum, err := blockchain.InsertChain(chain)
if err != nil {
t.Fatalf("block %d imported with error: %v", endnum, err)
}
- for i := 0; i < 2; i++ {
+ for i := range 2 {
b := blockchain.GetBlockByNumber(uint64(i) + 1)
if b == nil {
t.Fatalf("expected block %d to be present in chain", i+1)
@@ -783,7 +787,7 @@ func TestProcessVerkleSelfDestructInSeparateTx(t *testing.T) {
}
}
-// TestProcessVerkleSelfDestructInSeparateTx controls the contents of the witness after
+// TestProcessVerkleSelfDestructInSameTx controls the contents of the witness after
// a eip6780-compliant selfdestruct occurs.
func TestProcessVerkleSelfDestructInSameTx(t *testing.T) {
// The test txs were taken from a secondary testnet with chain id 69421
diff --git a/core/vm/common.go b/core/vm/common.go
index 658803b820..2990f58972 100644
--- a/core/vm/common.go
+++ b/core/vm/common.go
@@ -84,12 +84,3 @@ func toWordSize(size uint64) uint64 {
return (size + 31) / 32
}
-
-func allZero(b []byte) bool {
- for _, byte := range b {
- if byte != 0 {
- return false
- }
- }
- return true
-}
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 0ba87ef05f..2a2e4ca669 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -30,6 +30,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/blake2b"
@@ -46,6 +47,7 @@ import (
type PrecompiledContract interface {
RequiredGas(input []byte) uint64 // RequiredPrice calculates the contract gas use
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
+ Name() string
}
// PrecompiledContracts contains the precompiled contracts supported at the given fork.
@@ -225,7 +227,29 @@ var PrecompiledContractsIsthmus = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{},
}
+var PrecompiledContractsJovian = map[common.Address]PrecompiledContract{
+ common.BytesToAddress([]byte{1}): &ecrecover{},
+ common.BytesToAddress([]byte{2}): &sha256hash{},
+ common.BytesToAddress([]byte{3}): &ripemd160hash{},
+ common.BytesToAddress([]byte{4}): &dataCopy{},
+ common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true},
+ common.BytesToAddress([]byte{6}): &bn256AddIstanbul{},
+ common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{},
+ common.BytesToAddress([]byte{8}): &bn256PairingJovian{},
+ common.BytesToAddress([]byte{9}): &blake2F{},
+ common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{},
+ common.BytesToAddress([]byte{0x0b}): &bls12381G1Add{},
+ common.BytesToAddress([]byte{0x0c}): &bls12381G1MultiExpJovian{},
+ common.BytesToAddress([]byte{0x0d}): &bls12381G2Add{},
+ common.BytesToAddress([]byte{0x0e}): &bls12381G2MultiExpJovian{},
+ common.BytesToAddress([]byte{0x0f}): &bls12381PairingJovian{},
+ common.BytesToAddress([]byte{0x10}): &bls12381MapG1{},
+ common.BytesToAddress([]byte{0x11}): &bls12381MapG2{},
+ common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{},
+}
+
var (
+ PrecompiledAddressesJovian []common.Address
PrecompiledAddressesIsthmus []common.Address
PrecompiledAddressesGranite []common.Address
PrecompiledAddressesFjord []common.Address
@@ -269,11 +293,16 @@ func init() {
for k := range PrecompiledContractsIsthmus {
PrecompiledAddressesIsthmus = append(PrecompiledAddressesIsthmus, k)
}
+ for k := range PrecompiledContractsJovian {
+ PrecompiledAddressesJovian = append(PrecompiledAddressesJovian, k)
+ }
}
func activePrecompiledContracts(rules params.Rules) PrecompiledContracts {
// note: the order of these switch cases is important
switch {
+ case rules.IsOptimismJovian:
+ return PrecompiledContractsJovian
case rules.IsOptimismIsthmus:
return PrecompiledContractsIsthmus
case rules.IsOptimismGranite:
@@ -307,6 +336,8 @@ func ActivePrecompiledContracts(rules params.Rules) PrecompiledContracts {
// ActivePrecompiles returns the precompile addresses enabled with the current configuration.
func ActivePrecompiles(rules params.Rules) []common.Address {
switch {
+ case rules.IsOptimismJovian:
+ return PrecompiledAddressesJovian
case rules.IsOptimismIsthmus:
return PrecompiledAddressesIsthmus
case rules.IsOptimismGranite:
@@ -367,7 +398,7 @@ func (c *ecrecover) Run(input []byte) ([]byte, error) {
v := input[63] - 27
// tighter sig s values input homestead only apply to tx sigs
- if !allZero(input[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
+ if bitutil.TestBytes(input[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
return nil, nil
}
// We must make sure not to modify the 'input', so placing the 'v' along with
@@ -386,6 +417,10 @@ func (c *ecrecover) Run(input []byte) ([]byte, error) {
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32), nil
}
+func (c *ecrecover) Name() string {
+ return "ECREC"
+}
+
// SHA256 implemented as a native contract.
type sha256hash struct{}
@@ -401,6 +436,10 @@ func (c *sha256hash) Run(input []byte) ([]byte, error) {
return h[:], nil
}
+func (c *sha256hash) Name() string {
+ return "SHA256"
+}
+
// RIPEMD160 implemented as a native contract.
type ripemd160hash struct{}
@@ -417,6 +456,10 @@ func (c *ripemd160hash) Run(input []byte) ([]byte, error) {
return common.LeftPadBytes(ripemd.Sum(nil), 32), nil
}
+func (c *ripemd160hash) Name() string {
+ return "RIPEMD160"
+}
+
// data copy implemented as a native contract.
type dataCopy struct{}
@@ -431,6 +474,10 @@ func (c *dataCopy) Run(in []byte) ([]byte, error) {
return common.CopyBytes(in), nil
}
+func (c *dataCopy) Name() string {
+ return "ID"
+}
+
// bigModExp implements a native big integer exponential modular operation.
type bigModExp struct {
eip2565 bool
@@ -578,15 +625,24 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 {
func (c *bigModExp) Run(input []byte) ([]byte, error) {
var (
- baseLen = new(big.Int).SetBytes(getData(input, 0, 32)).Uint64()
- expLen = new(big.Int).SetBytes(getData(input, 32, 32)).Uint64()
- modLen = new(big.Int).SetBytes(getData(input, 64, 32)).Uint64()
+ baseLenBig = new(big.Int).SetBytes(getData(input, 0, 32))
+ expLenBig = new(big.Int).SetBytes(getData(input, 32, 32))
+ modLenBig = new(big.Int).SetBytes(getData(input, 64, 32))
+ baseLen = baseLenBig.Uint64()
+ expLen = expLenBig.Uint64()
+ modLen = modLenBig.Uint64()
+ inputLenOverflow = max(baseLenBig.BitLen(), expLenBig.BitLen(), modLenBig.BitLen()) > 64
)
if len(input) > 96 {
input = input[96:]
} else {
input = input[:0]
}
+
+ // enforce size cap for inputs
+ if c.eip7823 && (inputLenOverflow || max(baseLen, expLen, modLen) > 1024) {
+ return nil, errors.New("one or more of base/exponent/modulus length exceeded 1024 bytes")
+ }
// Handle a special case when both the base and mod length is zero
if baseLen == 0 && modLen == 0 {
return []byte{}, nil
@@ -615,6 +671,10 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
return common.LeftPadBytes(v, int(modLen)), nil
}
+func (c *bigModExp) Name() string {
+ return "MODEXP"
+}
+
// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
// returning it, or an error if the point is invalid.
func newCurvePoint(blob []byte) (*bn256.G1, error) {
@@ -664,6 +724,10 @@ func (c *bn256AddIstanbul) Run(input []byte) ([]byte, error) {
return runBn256Add(input)
}
+func (c *bn256AddIstanbul) Name() string {
+ return "BN254_ADD"
+}
+
// bn256AddByzantium implements a native elliptic curve point addition
// conforming to Byzantium consensus rules.
type bn256AddByzantium struct{}
@@ -677,6 +741,10 @@ func (c *bn256AddByzantium) Run(input []byte) ([]byte, error) {
return runBn256Add(input)
}
+func (c *bn256AddByzantium) Name() string {
+ return "BN254_ADD"
+}
+
// runBn256ScalarMul implements the Bn256ScalarMul precompile, referenced by
// both Byzantium and Istanbul operations.
func runBn256ScalarMul(input []byte) ([]byte, error) {
@@ -702,6 +770,10 @@ func (c *bn256ScalarMulIstanbul) Run(input []byte) ([]byte, error) {
return runBn256ScalarMul(input)
}
+func (c *bn256ScalarMulIstanbul) Name() string {
+ return "BN254_MUL"
+}
+
// bn256ScalarMulByzantium implements a native elliptic curve scalar
// multiplication conforming to Byzantium consensus rules.
type bn256ScalarMulByzantium struct{}
@@ -715,6 +787,10 @@ func (c *bn256ScalarMulByzantium) Run(input []byte) ([]byte, error) {
return runBn256ScalarMul(input)
}
+func (c *bn256ScalarMulByzantium) Name() string {
+ return "BN254_MUL"
+}
+
var (
// true32Byte is returned if the bn256 pairing check succeeds.
true32Byte = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
@@ -776,6 +852,27 @@ func (c *bn256PairingGranite) Run(input []byte) ([]byte, error) {
return runBn256Pairing(input)
}
+func (c *bn256PairingGranite) Name() string {
+ return "BN254_PAIRING"
+}
+
+type bn256PairingJovian struct{}
+
+func (c *bn256PairingJovian) RequiredGas(input []byte) uint64 {
+ return new(bn256PairingIstanbul).RequiredGas(input)
+}
+
+func (c *bn256PairingJovian) Run(input []byte) ([]byte, error) {
+ if len(input) > int(params.Bn256PairingMaxInputSizeJovian) {
+ return nil, errBadPairingInputSize
+ }
+ return runBn256Pairing(input)
+}
+
+func (c *bn256PairingJovian) Name() string {
+ return "BN254_PAIRING"
+}
+
// bn256PairingIstanbul implements a pairing pre-compile for the bn256 curve
// conforming to Istanbul consensus rules.
type bn256PairingIstanbul struct{}
@@ -789,6 +886,10 @@ func (c *bn256PairingIstanbul) Run(input []byte) ([]byte, error) {
return runBn256Pairing(input)
}
+func (c *bn256PairingIstanbul) Name() string {
+ return "BN254_PAIRING"
+}
+
// bn256PairingByzantium implements a pairing pre-compile for the bn256 curve
// conforming to Byzantium consensus rules.
type bn256PairingByzantium struct{}
@@ -802,6 +903,10 @@ func (c *bn256PairingByzantium) Run(input []byte) ([]byte, error) {
return runBn256Pairing(input)
}
+func (c *bn256PairingByzantium) Name() string {
+ return "BN254_PAIRING"
+}
+
type blake2F struct{}
func (c *blake2F) RequiredGas(input []byte) uint64 {
@@ -863,6 +968,10 @@ func (c *blake2F) Run(input []byte) ([]byte, error) {
return output, nil
}
+func (c *blake2F) Name() string {
+ return "BLAKE2F"
+}
+
var (
errBLS12381InvalidInputLength = errors.New("invalid input length")
errBLS12381InvalidFieldElementTopBytes = errors.New("invalid field element top bytes")
@@ -923,8 +1032,35 @@ func (c *bls12381G1MultiExpIsthmus) Run(input []byte) ([]byte, error) {
return new(bls12381G1MultiExp).Run(input)
}
+func (c *bls12381G1MultiExpIsthmus) Name() string {
+ return "BLS12_G1MSM"
+}
+
+type bls12381G1MultiExpJovian struct {
+}
+
+func (c *bls12381G1MultiExpJovian) RequiredGas(input []byte) uint64 {
+ return new(bls12381G1MultiExp).RequiredGas(input)
+}
+
+func (c *bls12381G1MultiExpJovian) Run(input []byte) ([]byte, error) {
+ if len(input) > int(params.Bls12381G1MulMaxInputSizeJovian) {
+ return nil, errBLS12381MaxG1Size
+ }
+
+ return new(bls12381G1MultiExp).Run(input)
+}
+
+func (c *bls12381G1MultiExpJovian) Name() string {
+ return "BLS12_G1MSM"
+}
// bls12381G1MultiExp implements EIP-2537 G1MultiExp precompile for Prague (no size limits).
+func (c *bls12381G1Add) Name() string {
+ return "BLS12_G1ADD"
+}
+
+// bls12381G1MultiExp implements EIP-2537 G1MultiExp precompile.
type bls12381G1MultiExp struct{}
// RequiredGas returns the gas required to execute the pre-compiled contract.
@@ -984,6 +1120,10 @@ func (c *bls12381G1MultiExp) Run(input []byte) ([]byte, error) {
return encodePointG1(r), nil
}
+func (c *bls12381G1MultiExp) Name() string {
+ return "BLS12_G1MSM"
+}
+
// bls12381G2Add implements EIP-2537 G2Add precompile.
type bls12381G2Add struct{}
@@ -1021,6 +1161,10 @@ func (c *bls12381G2Add) Run(input []byte) ([]byte, error) {
return encodePointG2(r), nil
}
+func (c *bls12381G2Add) Name() string {
+ return "BLS12_G2ADD"
+}
+
type bls12381G2MultiExpIsthmus struct {
}
@@ -1036,6 +1180,29 @@ func (c *bls12381G2MultiExpIsthmus) Run(input []byte) ([]byte, error) {
return new(bls12381G2MultiExp).Run(input)
}
+func (c *bls12381G2MultiExpIsthmus) Name() string {
+ return "BLS12_G2MSM"
+}
+
+type bls12381G2MultiExpJovian struct {
+}
+
+func (c *bls12381G2MultiExpJovian) RequiredGas(input []byte) uint64 {
+ return new(bls12381G2MultiExp).RequiredGas(input)
+}
+
+func (c *bls12381G2MultiExpJovian) Run(input []byte) ([]byte, error) {
+ if len(input) > int(params.Bls12381G2MulMaxInputSizeJovian) {
+ return nil, errBLS12381MaxG2Size
+ }
+
+ return new(bls12381G2MultiExp).Run(input)
+}
+
+func (c *bls12381G2MultiExpJovian) Name() string {
+ return "BLS12_G2MSM"
+}
+
// bls12381G2MultiExp implements EIP-2537 G2MultiExp precompile.
type bls12381G2MultiExp struct{}
@@ -1096,6 +1263,10 @@ func (c *bls12381G2MultiExp) Run(input []byte) ([]byte, error) {
return encodePointG2(r), nil
}
+func (c *bls12381G2MultiExp) Name() string {
+ return "BLS12_G2MSM"
+}
+
type bls12381PairingIsthmus struct {
}
@@ -1111,6 +1282,29 @@ func (c *bls12381PairingIsthmus) Run(input []byte) ([]byte, error) {
return new(bls12381Pairing).Run(input)
}
+func (c *bls12381PairingIsthmus) Name() string {
+ return "BLS12_PAIRING_CHECK"
+}
+
+type bls12381PairingJovian struct {
+}
+
+func (c *bls12381PairingJovian) RequiredGas(input []byte) uint64 {
+ return new(bls12381Pairing).RequiredGas(input)
+}
+
+func (c *bls12381PairingJovian) Run(input []byte) ([]byte, error) {
+ if len(input) > int(params.Bls12381PairingMaxInputSizeJovian) {
+ return nil, errBLS12381MaxPairingSize
+ }
+
+ return new(bls12381Pairing).Run(input)
+}
+
+func (c *bls12381PairingJovian) Name() string {
+ return "BLS12_PAIRING_CHECK"
+}
+
// bls12381Pairing implements EIP-2537 Pairing precompile.
type bls12381Pairing struct{}
@@ -1174,6 +1368,10 @@ func (c *bls12381Pairing) Run(input []byte) ([]byte, error) {
return out, nil
}
+func (c *bls12381Pairing) Name() string {
+ return "BLS12_PAIRING_CHECK"
+}
+
func decodePointG1(in []byte) (*bls12381.G1Affine, error) {
if len(in) != 128 {
return nil, errors.New("invalid g1 point length")
@@ -1292,6 +1490,10 @@ func (c *bls12381MapG1) Run(input []byte) ([]byte, error) {
return encodePointG1(&r), nil
}
+func (c *bls12381MapG1) Name() string {
+ return "BLS12_MAP_FP_TO_G1"
+}
+
// bls12381MapG2 implements EIP-2537 MapG2 precompile.
type bls12381MapG2 struct{}
@@ -1325,6 +1527,10 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) {
return encodePointG2(&r), nil
}
+func (c *bls12381MapG2) Name() string {
+ return "BLS12_MAP_FP2_TO_G2"
+}
+
// kzgPointEvaluation implements the EIP-4844 point evaluation precompile.
type kzgPointEvaluation struct{}
@@ -1381,6 +1587,10 @@ func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) {
return common.Hex2Bytes(blobPrecompileReturnValue), nil
}
+func (b *kzgPointEvaluation) Name() string {
+ return "KZG_POINT_EVALUATION"
+}
+
// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844
func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash {
h := sha256.Sum256(kzg[:])
@@ -1430,3 +1640,7 @@ func (c *p256Verify) Run(input []byte) ([]byte, error) {
}
return nil, nil
}
+
+func (c *p256Verify) Name() string {
+ return "P256VERIFY"
+}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index 74f2f55bc6..2596c52298 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -74,6 +74,11 @@ var allPrecompiles = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{0x0b}): &p256Verify{},
common.BytesToAddress([]byte{0x01, 0x00}): &p256VerifyFjord{},
+
+ common.BytesToAddress([]byte{0x2f, 0x08}): &bn256PairingJovian{},
+ common.BytesToAddress([]byte{0x2f, 0x0e}): &bls12381PairingJovian{},
+ common.BytesToAddress([]byte{0x2f, 0x0b}): &bls12381G1MultiExpJovian{},
+ common.BytesToAddress([]byte{0x2f, 0x0d}): &bls12381G2MultiExpJovian{},
}
// EIP-152 test vectors
diff --git a/core/vm/eips.go b/core/vm/eips.go
index 7764bd20b6..10ca1fe9ab 100644
--- a/core/vm/eips.go
+++ b/core/vm/eips.go
@@ -89,8 +89,8 @@ func enable1884(jt *JumpTable) {
}
}
-func opSelfBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
+func opSelfBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
scope.Stack.push(balance)
return nil, nil
}
@@ -108,8 +108,8 @@ func enable1344(jt *JumpTable) {
}
// opChainID implements CHAINID opcode
-func opChainID(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- chainId, _ := uint256.FromBig(interpreter.evm.chainConfig.ChainID)
+func opChainID(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ chainId, _ := uint256.FromBig(evm.chainConfig.ChainID)
scope.Stack.push(chainId)
return nil, nil
}
@@ -199,28 +199,28 @@ func enable1153(jt *JumpTable) {
}
// opTload implements TLOAD opcode
-func opTload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opTload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32())
- val := interpreter.evm.StateDB.GetTransientState(scope.Contract.Address(), hash)
+ val := evm.StateDB.GetTransientState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes())
return nil, nil
}
// opTstore implements TSTORE opcode
-func opTstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opTstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
- interpreter.evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
+ evm.StateDB.SetTransientState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil
}
// opBaseFee implements BASEFEE opcode
-func opBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- baseFee, _ := uint256.FromBig(interpreter.evm.Context.BaseFee)
+func opBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ baseFee, _ := uint256.FromBig(evm.Context.BaseFee)
scope.Stack.push(baseFee)
return nil, nil
}
@@ -237,7 +237,7 @@ func enable3855(jt *JumpTable) {
}
// opPush0 implements the PUSH0 opcode
-func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush0(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int))
return nil, nil
}
@@ -263,7 +263,7 @@ func enable5656(jt *JumpTable) {
}
// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656)
-func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMcopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
dst = scope.Stack.pop()
src = scope.Stack.pop()
@@ -276,10 +276,10 @@ func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
}
// opBlobHash implements the BLOBHASH opcode
-func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBlobHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
index := scope.Stack.peek()
- if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) {
- blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()]
+ if index.LtUint64(uint64(len(evm.TxContext.BlobHashes))) {
+ blobHash := evm.TxContext.BlobHashes[index.Uint64()]
index.SetBytes32(blobHash[:])
} else {
index.Clear()
@@ -288,14 +288,14 @@ func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
}
// opBlobBaseFee implements BLOBBASEFEE opcode
-func opBlobBaseFee(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- blobBaseFee, _ := uint256.FromBig(interpreter.evm.Context.BlobBaseFee)
+func opBlobBaseFee(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ blobBaseFee, _ := uint256.FromBig(evm.Context.BlobBaseFee)
scope.Stack.push(blobBaseFee)
return nil, nil
}
// opCLZ implements the CLZ opcode (count leading zero bytes)
-func opCLZ(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCLZ(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
x.SetUint64(256 - uint64(x.BitLen()))
return nil, nil
@@ -342,7 +342,7 @@ func enable6780(jt *JumpTable) {
}
}
-func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeCopyEIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
stack = scope.Stack
a = stack.pop()
@@ -355,10 +355,10 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
uint64CodeOffset = math.MaxUint64
}
addr := common.Address(a.Bytes20())
- code := interpreter.evm.StateDB.GetCode(addr)
+ code := evm.StateDB.GetCode(addr)
paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64())
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(addr, copyOffset, nonPaddedCopyLength, uint64(len(code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -370,7 +370,7 @@ func opExtCodeCopyEIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeC
// opPush1EIP4762 handles the special case of PUSH1 opcode for EIP-4762, which
// need not worry about the adjusted bound logic when adding the PUSHDATA to
// the list of access events.
-func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush1EIP4762(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -383,8 +383,8 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
// touch next chunk if PUSH1 is at the boundary. if so, *pc has
// advanced past this boundary.
contractAddr := scope.Contract.Address()
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(wanted, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, *pc+1, uint64(1), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(wanted, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -396,7 +396,7 @@ func opPush1EIP4762(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
}
func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1))
@@ -411,8 +411,8 @@ func makePushEIP4762(size uint64, pushByteSize int) executionFunc {
if !scope.Contract.IsDeployment && !scope.Contract.IsSystemCall {
contractAddr := scope.Contract.Address()
- consumed, wanted := interpreter.evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
- scope.Contract.UseGas(consumed, interpreter.evm.Config.Tracer, tracing.GasChangeUnspecified)
+ consumed, wanted := evm.AccessEvents.CodeChunksRangeGas(contractAddr, uint64(start), uint64(pushByteSize), uint64(len(scope.Contract.Code)), false, scope.Contract.Gas)
+ scope.Contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeUnspecified)
if consumed < wanted {
return nil, ErrOutOfGas
}
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 8399d64a2d..2b1d940257 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
@@ -103,6 +104,9 @@ type EVM struct {
// StateDB gives access to the underlying state
StateDB StateDB
+ // table holds the opcode specific handlers
+ table *JumpTable
+
// depth is the current call stack
depth int
@@ -115,10 +119,6 @@ type EVM struct {
// virtual machine configuration options used to initialise the evm
Config Config
- // global (to this context) ethereum virtual machine used throughout
- // the execution of the tx
- interpreter *EVMInterpreter
-
// abort is used to abort the EVM calling operations
abort atomic.Bool
@@ -132,6 +132,12 @@ type EVM struct {
// jumpDests stores results of JUMPDEST analysis.
jumpDests JumpDestCache
+
+ hasher crypto.KeccakState // Keccak256 hasher instance shared across opcodes
+ hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
+
+ readOnly bool // Whether to throw on stateful modifications
+ returnData []byte // Last CALL's return data for subsequent reuse
}
// NewEVM constructs an EVM instanceproto with the supplied block context, state
@@ -146,9 +152,57 @@ func NewEVM(blockCtx BlockContext, statedb StateDB, chainConfig *params.ChainCon
chainConfig: chainConfig,
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time),
jumpDests: newMapJumpDests(),
+ hasher: crypto.NewKeccakState(),
}
evm.precompiles = activePrecompiledContracts(evm.chainRules)
- evm.interpreter = NewEVMInterpreter(evm)
+
+ switch {
+ case evm.chainRules.IsOsaka:
+ evm.table = &osakaInstructionSet
+ case evm.chainRules.IsVerkle:
+ // TODO replace with proper instruction set when fork is specified
+ evm.table = &verkleInstructionSet
+ case evm.chainRules.IsPrague:
+ evm.table = &pragueInstructionSet
+ case evm.chainRules.IsCancun:
+ evm.table = &cancunInstructionSet
+ case evm.chainRules.IsShanghai:
+ evm.table = &shanghaiInstructionSet
+ case evm.chainRules.IsMerge:
+ evm.table = &mergeInstructionSet
+ case evm.chainRules.IsLondon:
+ evm.table = &londonInstructionSet
+ case evm.chainRules.IsBerlin:
+ evm.table = &berlinInstructionSet
+ case evm.chainRules.IsIstanbul:
+ evm.table = &istanbulInstructionSet
+ case evm.chainRules.IsConstantinople:
+ evm.table = &constantinopleInstructionSet
+ case evm.chainRules.IsByzantium:
+ evm.table = &byzantiumInstructionSet
+ case evm.chainRules.IsEIP158:
+ evm.table = &spuriousDragonInstructionSet
+ case evm.chainRules.IsEIP150:
+ evm.table = &tangerineWhistleInstructionSet
+ case evm.chainRules.IsHomestead:
+ evm.table = &homesteadInstructionSet
+ default:
+ evm.table = &frontierInstructionSet
+ }
+ var extraEips []int
+ if len(evm.Config.ExtraEips) > 0 {
+ // Deep-copy jumptable to prevent modification of opcodes in other tables
+ evm.table = copyJumpTable(evm.table)
+ }
+ for _, eip := range evm.Config.ExtraEips {
+ if err := EnableEIP(eip, evm.table); err != nil {
+ // Disable it, so caller can check if it's activated or not
+ log.Error("EIP activation failed", "eip", eip, "error", err)
+ } else {
+ extraEips = append(extraEips, eip)
+ }
+ }
+ evm.Config.ExtraEips = extraEips
return evm
}
@@ -184,11 +238,6 @@ func (evm *EVM) Cancelled() bool {
return evm.abort.Load()
}
-// Interpreter returns the current interpreter
-func (evm *EVM) Interpreter() *EVMInterpreter {
- return evm.interpreter
-}
-
// OP-Stack addition
func (evm *EVM) maybeOverrideCaller(caller common.Address) common.Address {
if evm.Config.CallerOverride != nil {
@@ -262,7 +311,7 @@ func (evm *EVM) Call(caller common.Address, addr common.Address, input []byte, g
contract := NewContract(caller, addr, value, gas, evm.jumpDests)
contract.IsSystemCall = isSystemCall(caller)
contract.SetCallCode(evm.resolveCodeHash(addr), code)
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
}
@@ -322,7 +371,7 @@ func (evm *EVM) CallCode(caller common.Address, addr common.Address, input []byt
// The contract is a scoped environment for this execution context only.
contract := NewContract(caller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
if err != nil {
@@ -367,7 +416,7 @@ func (evm *EVM) DelegateCall(originCaller common.Address, caller common.Address,
// Note: The value refers to the original value from the parent call.
contract := NewContract(originCaller, caller, value, gas, evm.jumpDests)
contract.SetCallCode(evm.resolveCodeHash(addr), evm.resolveCode(addr))
- ret, err = evm.interpreter.Run(contract, input, false)
+ ret, err = evm.Run(contract, input, false)
gas = contract.Gas
}
if err != nil {
@@ -423,7 +472,7 @@ func (evm *EVM) StaticCall(caller common.Address, addr common.Address, input []b
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in Homestead this also counts for code storage gas errors.
- ret, err = evm.interpreter.Run(contract, input, true)
+ ret, err = evm.Run(contract, input, true)
gas = contract.Gas
}
if err != nil {
@@ -544,7 +593,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// initNewContract runs a new contract's creation code, performs checks on the
// resulting code that is to be deployed, and consumes necessary gas.
func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]byte, error) {
- ret, err := evm.interpreter.Run(contract, nil, false)
+ ret, err := evm.Run(contract, nil, false)
if err != nil {
return ret, err
}
@@ -589,7 +638,7 @@ func (evm *EVM) Create(caller common.Address, code []byte, gas uint64, value *ui
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller common.Address, code []byte, gas uint64, endowment *uint256.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
caller = evm.maybeOverrideCaller(caller)
- inithash := crypto.HashData(evm.interpreter.hasher, code)
+ inithash := crypto.HashData(evm.hasher, code)
contractAddr = crypto.CreateAddress2(caller, salt.Bytes32(), inithash[:])
return evm.create(caller, code, gas, endowment, contractAddr, CREATE2)
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 63bb6d2d51..44d3e81a9c 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -26,67 +26,67 @@ import (
"github.com/holiman/uint256"
)
-func opAdd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAdd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Add(&x, y)
return nil, nil
}
-func opSub(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSub(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Sub(&x, y)
return nil, nil
}
-func opMul(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMul(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mul(&x, y)
return nil, nil
}
-func opDiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opDiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Div(&x, y)
return nil, nil
}
-func opSdiv(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSdiv(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.SDiv(&x, y)
return nil, nil
}
-func opMod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Mod(&x, y)
return nil, nil
}
-func opSmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.SMod(&x, y)
return nil, nil
}
-func opExp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
base, exponent := scope.Stack.pop(), scope.Stack.peek()
exponent.Exp(&base, exponent)
return nil, nil
}
-func opSignExtend(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSignExtend(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
back, num := scope.Stack.pop(), scope.Stack.peek()
num.ExtendSign(num, &back)
return nil, nil
}
-func opNot(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opNot(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
x.Not(x)
return nil, nil
}
-func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opLt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Lt(y) {
y.SetOne()
@@ -96,7 +96,7 @@ func opLt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opGt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Gt(y) {
y.SetOne()
@@ -106,7 +106,7 @@ func opGt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSlt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Slt(y) {
y.SetOne()
@@ -116,7 +116,7 @@ func opSlt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSgt(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Sgt(y) {
y.SetOne()
@@ -126,7 +126,7 @@ func opSgt(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opEq(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
if x.Eq(y) {
y.SetOne()
@@ -136,7 +136,7 @@ func opEq(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte,
return nil, nil
}
-func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opIszero(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
if x.IsZero() {
x.SetOne()
@@ -146,37 +146,37 @@ func opIszero(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
return nil, nil
}
-func opAnd(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAnd(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.And(&x, y)
return nil, nil
}
-func opOr(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opOr(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Or(&x, y)
return nil, nil
}
-func opXor(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opXor(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y := scope.Stack.pop(), scope.Stack.peek()
y.Xor(&x, y)
return nil, nil
}
-func opByte(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opByte(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
th, val := scope.Stack.pop(), scope.Stack.peek()
val.Byte(&th)
return nil, nil
}
-func opAddmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAddmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.AddMod(&x, &y, z)
return nil, nil
}
-func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMulmod(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x, y, z := scope.Stack.pop(), scope.Stack.pop(), scope.Stack.peek()
z.MulMod(&x, &y, z)
return nil, nil
@@ -185,7 +185,7 @@ func opMulmod(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
// opSHL implements Shift Left
// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the left by arg1 number of bits.
-func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSHL(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) {
@@ -199,7 +199,7 @@ func opSHL(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSHR implements Logical Shift Right
// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
-func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSHR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.LtUint64(256) {
@@ -213,7 +213,7 @@ func opSHR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
// opSAR implements Arithmetic Shift Right
// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
-func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSAR(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
shift, value := scope.Stack.pop(), scope.Stack.peek()
if shift.GtUint64(256) {
if value.Sign() >= 0 {
@@ -229,50 +229,49 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
return nil, nil
}
-func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opKeccak256(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.peek()
data := scope.Memory.GetPtr(offset.Uint64(), size.Uint64())
- interpreter.hasher.Reset()
- interpreter.hasher.Write(data)
- interpreter.hasher.Read(interpreter.hasherBuf[:])
+ evm.hasher.Reset()
+ evm.hasher.Write(data)
+ evm.hasher.Read(evm.hasherBuf[:])
- evm := interpreter.evm
if evm.Config.EnablePreimageRecording {
- evm.StateDB.AddPreimage(interpreter.hasherBuf, data)
+ evm.StateDB.AddPreimage(evm.hasherBuf, data)
}
- size.SetBytes(interpreter.hasherBuf[:])
+ size.SetBytes(evm.hasherBuf[:])
return nil, nil
}
-func opAddress(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opAddress(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Address().Bytes()))
return nil, nil
}
-func opBalance(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBalance(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())
- slot.Set(interpreter.evm.StateDB.GetBalance(address))
+ slot.Set(evm.StateDB.GetBalance(address))
return nil, nil
}
-func opOrigin(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Origin.Bytes()))
+func opOrigin(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetBytes(evm.Origin.Bytes()))
return nil, nil
}
-func opCaller(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCaller(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetBytes(scope.Contract.Caller().Bytes()))
return nil, nil
}
-func opCallValue(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallValue(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(scope.Contract.value)
return nil, nil
}
-func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataLoad(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
x := scope.Stack.peek()
if offset, overflow := x.Uint64WithOverflow(); !overflow {
data := getData(scope.Contract.Input, offset, 32)
@@ -283,12 +282,12 @@ func opCallDataLoad(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil
}
-func opCallDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Input))))
return nil, nil
}
-func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCallDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop()
@@ -306,12 +305,12 @@ func opCallDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
return nil, nil
}
-func opReturnDataSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(interpreter.returnData))))
+func opReturnDataSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(evm.returnData))))
return nil, nil
}
-func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opReturnDataCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
dataOffset = scope.Stack.pop()
@@ -326,25 +325,25 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
var end = dataOffset
end.Add(&dataOffset, &length)
end64, overflow := end.Uint64WithOverflow()
- if overflow || uint64(len(interpreter.returnData)) < end64 {
+ if overflow || uint64(len(evm.returnData)) < end64 {
return nil, ErrReturnDataOutOfBounds
}
- scope.Memory.Set(memOffset.Uint64(), length.Uint64(), interpreter.returnData[offset64:end64])
+ scope.Memory.Set(memOffset.Uint64(), length.Uint64(), evm.returnData[offset64:end64])
return nil, nil
}
-func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
- slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
+ slot.SetUint64(uint64(evm.StateDB.GetCodeSize(slot.Bytes20())))
return nil, nil
}
-func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCodeSize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code))))
return nil, nil
}
-func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
memOffset = scope.Stack.pop()
codeOffset = scope.Stack.pop()
@@ -360,7 +359,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
return nil, nil
}
-func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeCopy(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
stack = scope.Stack
a = stack.pop()
@@ -373,7 +372,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
uint64CodeOffset = math.MaxUint64
}
addr := common.Address(a.Bytes20())
- code := interpreter.evm.StateDB.GetCode(addr)
+ code := evm.StateDB.GetCode(addr)
codeCopy := getData(code, uint64CodeOffset, length.Uint64())
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
@@ -406,24 +405,24 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
//
// 6. Caller tries to get the code hash for an account which is marked as deleted, this
// account should be regarded as a non-existent account and zero should be returned.
-func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opExtCodeHash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
slot := scope.Stack.peek()
address := common.Address(slot.Bytes20())
- if interpreter.evm.StateDB.Empty(address) {
+ if evm.StateDB.Empty(address) {
slot.Clear()
} else {
- slot.SetBytes(interpreter.evm.StateDB.GetCodeHash(address).Bytes())
+ slot.SetBytes(evm.StateDB.GetCodeHash(address).Bytes())
}
return nil, nil
}
-func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.GasPrice)
+func opGasprice(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.GasPrice)
scope.Stack.push(v)
return nil, nil
}
-func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opBlockhash(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
num := scope.Stack.peek()
num64, overflow := num.Uint64WithOverflow()
if overflow {
@@ -432,18 +431,18 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
}
var upper, lower uint64
- upper = interpreter.evm.Context.BlockNumber.Uint64()
+ upper = evm.Context.BlockNumber.Uint64()
if upper < 257 {
lower = 0
} else {
lower = upper - 256
}
if num64 >= lower && num64 < upper {
- res := interpreter.evm.Context.GetHash(num64)
- if witness := interpreter.evm.StateDB.Witness(); witness != nil {
+ res := evm.Context.GetHash(num64)
+ if witness := evm.StateDB.Witness(); witness != nil {
witness.AddBlockHash(num64)
}
- if tracer := interpreter.evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
+ if tracer := evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
tracer.OnBlockHashRead(num64, res)
}
num.SetBytes(res[:])
@@ -453,83 +452,83 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
return nil, nil
}
-func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes()))
+func opCoinbase(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetBytes(evm.Context.Coinbase.Bytes()))
return nil, nil
}
-func opTimestamp(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.Time))
+func opTimestamp(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.Time))
return nil, nil
}
-func opNumber(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber)
+func opNumber(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.Context.BlockNumber)
scope.Stack.push(v)
return nil, nil
}
-func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty)
+func opDifficulty(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v, _ := uint256.FromBig(evm.Context.Difficulty)
scope.Stack.push(v)
return nil, nil
}
-func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- v := new(uint256.Int).SetBytes(interpreter.evm.Context.Random.Bytes())
+func opRandom(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ v := new(uint256.Int).SetBytes(evm.Context.Random.Bytes())
scope.Stack.push(v)
return nil, nil
}
-func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
+func opGasLimit(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.push(new(uint256.Int).SetUint64(evm.Context.GasLimit))
return nil, nil
}
-func opPop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.pop()
return nil, nil
}
-func opMload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
v := scope.Stack.peek()
offset := v.Uint64()
v.SetBytes(scope.Memory.GetPtr(offset, 32))
return nil, nil
}
-func opMstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
mStart, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.Set32(mStart.Uint64(), &val)
return nil, nil
}
-func opMstore8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMstore8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
off, val := scope.Stack.pop(), scope.Stack.pop()
scope.Memory.store[off.Uint64()] = byte(val.Uint64())
return nil, nil
}
-func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSload(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
loc := scope.Stack.peek()
hash := common.Hash(loc.Bytes32())
- val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
+ val := evm.StateDB.GetState(scope.Contract.Address(), hash)
loc.SetBytes(val.Bytes())
return nil, nil
}
-func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSstore(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
loc := scope.Stack.pop()
val := scope.Stack.pop()
- interpreter.evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
+ evm.StateDB.SetState(scope.Contract.Address(), loc.Bytes32(), val.Bytes32())
return nil, nil
}
-func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.evm.abort.Load() {
+func opJump(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.abort.Load() {
return nil, errStopToken
}
pos := scope.Stack.pop()
@@ -540,8 +539,8 @@ func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
return nil, nil
}
-func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.evm.abort.Load() {
+func opJumpi(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.abort.Load() {
return nil, errStopToken
}
pos, cond := scope.Stack.pop(), scope.Stack.pop()
@@ -554,107 +553,107 @@ func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
return nil, nil
}
-func opJumpdest(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opJumpdest(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, nil
}
-func opPc(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPc(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(*pc))
return nil, nil
}
-func opMsize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opMsize(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(uint64(scope.Memory.Len())))
return nil, nil
}
-func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opGas(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.push(new(uint256.Int).SetUint64(scope.Contract.Gas))
return nil, nil
}
-func opSwap1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap1()
return nil, nil
}
-func opSwap2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap2()
return nil, nil
}
-func opSwap3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap3(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap3()
return nil, nil
}
-func opSwap4(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap4(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap4()
return nil, nil
}
-func opSwap5(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap5(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap5()
return nil, nil
}
-func opSwap6(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap6(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap6()
return nil, nil
}
-func opSwap7(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap7(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap7()
return nil, nil
}
-func opSwap8(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap8(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap8()
return nil, nil
}
-func opSwap9(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap9(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap9()
return nil, nil
}
-func opSwap10(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap10(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap10()
return nil, nil
}
-func opSwap11(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap11(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap11()
return nil, nil
}
-func opSwap12(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap12(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap12()
return nil, nil
}
-func opSwap13(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap13(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap13()
return nil, nil
}
-func opSwap14(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap14(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap14()
return nil, nil
}
-func opSwap15(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap15(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap15()
return nil, nil
}
-func opSwap16(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opSwap16(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
scope.Stack.swap16()
return nil, nil
}
-func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opCreate(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
var (
@@ -663,21 +662,21 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
input = scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
gas = scope.Contract.Gas
)
- if interpreter.evm.chainRules.IsEIP150 {
+ if evm.chainRules.IsEIP150 {
gas -= gas / 64
}
// reuse size int for stackvalue
stackvalue := size
- scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation)
+ scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation)
- res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract.Address(), input, gas, &value)
+ res, addr, returnGas, suberr := evm.Create(scope.Contract.Address(), input, gas, &value)
// Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only
// rule) and treat as an error, if the ruleset is frontier we must
// ignore this error and pretend the operation was successful.
- if interpreter.evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas {
+ if evm.chainRules.IsHomestead && suberr == ErrCodeStoreOutOfGas {
stackvalue.Clear()
} else if suberr != nil && suberr != ErrCodeStoreOutOfGas {
stackvalue.Clear()
@@ -686,18 +685,18 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
}
scope.Stack.push(&stackvalue)
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted {
- interpreter.returnData = res // set REVERT data to return data buffer
+ evm.returnData = res // set REVERT data to return data buffer
return res, nil
}
- interpreter.returnData = nil // clear dirty return data buffer
+ evm.returnData = nil // clear dirty return data buffer
return nil, nil
}
-func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opCreate2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
var (
@@ -710,10 +709,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
// Apply EIP150
gas -= gas / 64
- scope.Contract.UseGas(gas, interpreter.evm.Config.Tracer, tracing.GasChangeCallContractCreation2)
+ scope.Contract.UseGas(gas, evm.Config.Tracer, tracing.GasChangeCallContractCreation2)
// reuse size int for stackvalue
stackvalue := size
- res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract.Address(), input, gas,
+ res, addr, returnGas, suberr := evm.Create2(scope.Contract.Address(), input, gas,
&endowment, &salt)
// Push item on the stack based on the returned error.
if suberr != nil {
@@ -722,35 +721,35 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
stackvalue.SetBytes(addr.Bytes())
}
scope.Stack.push(&stackvalue)
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
if suberr == ErrExecutionReverted {
- interpreter.returnData = res // set REVERT data to return data buffer
+ evm.returnData = res // set REVERT data to return data buffer
return res, nil
}
- interpreter.returnData = nil // clear dirty return data buffer
+ evm.returnData = nil // clear dirty return data buffer
return nil, nil
}
-func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack
- // Pop gas. The actual gas in interpreter.evm.callGasTemp.
+ // Pop gas. The actual gas in evm.callGasTemp.
// We can use this as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get the arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- if interpreter.readOnly && !value.IsZero() {
+ if evm.readOnly && !value.IsZero() {
return nil, ErrWriteProtection
}
if !value.IsZero() {
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.Call(scope.Contract.Address(), toAddr, args, gas, &value)
+ ret, returnGas, err := evm.Call(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil {
temp.Clear()
@@ -762,18 +761,18 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+func opCallCode(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, value, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
@@ -784,7 +783,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
+ ret, returnGas, err := evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil {
temp.Clear()
} else {
@@ -795,25 +794,25 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opDelegateCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
stack := scope.Stack
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+ // Pop gas. The actual gas is in evm.callGasTemp.
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value)
+ ret, returnGas, err := evm.DelegateCall(scope.Contract.Caller(), scope.Contract.Address(), toAddr, args, gas, scope.Contract.value)
if err != nil {
temp.Clear()
} else {
@@ -824,25 +823,25 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- // Pop gas. The actual gas is in interpreter.evm.callGasTemp.
+func opStaticCall(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ // Pop gas. The actual gas is in evm.callGasTemp.
stack := scope.Stack
// We use it as a temporary value
temp := stack.pop()
- gas := interpreter.evm.callGasTemp
+ gas := evm.callGasTemp
// Pop other call parameters.
addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.Address(addr.Bytes20())
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
+ ret, returnGas, err := evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
if err != nil {
temp.Clear()
} else {
@@ -853,69 +852,69 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret)
}
- scope.Contract.RefundGas(returnGas, interpreter.evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ scope.Contract.RefundGas(returnGas, evm.Config.Tracer, tracing.GasChangeCallLeftOverRefunded)
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, nil
}
-func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opReturn(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
return ret, errStopToken
}
-func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opRevert(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
offset, size := scope.Stack.pop(), scope.Stack.pop()
ret := scope.Memory.GetCopy(offset.Uint64(), size.Uint64())
- interpreter.returnData = ret
+ evm.returnData = ret
return ret, ErrExecutionReverted
}
-func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opUndefined(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])}
}
-func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opStop(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
return nil, errStopToken
}
-func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSelfdestruct(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
beneficiary := scope.Stack.pop()
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
- interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
- interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address())
- if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
+ evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
+ evm.StateDB.SelfDestruct(scope.Contract.Address())
+ if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
- tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
+ tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
}
if tracer.OnExit != nil {
- tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false)
+ tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
}
}
return nil, errStopToken
}
-func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+func opSelfdestruct6780(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
beneficiary := scope.Stack.pop()
- balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
- interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
- interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
- interpreter.evm.StateDB.SelfDestruct6780(scope.Contract.Address())
- if tracer := interpreter.evm.Config.Tracer; tracer != nil {
+ balance := evm.StateDB.GetBalance(scope.Contract.Address())
+ evm.StateDB.SubBalance(scope.Contract.Address(), balance, tracing.BalanceDecreaseSelfdestruct)
+ evm.StateDB.AddBalance(beneficiary.Bytes20(), balance, tracing.BalanceIncreaseSelfdestruct)
+ evm.StateDB.SelfDestruct6780(scope.Contract.Address())
+ if tracer := evm.Config.Tracer; tracer != nil {
if tracer.OnEnter != nil {
- tracer.OnEnter(interpreter.evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
+ tracer.OnEnter(evm.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance.ToBig())
}
if tracer.OnExit != nil {
- tracer.OnExit(interpreter.evm.depth, []byte{}, 0, nil, false)
+ tracer.OnExit(evm.depth, []byte{}, 0, nil, false)
}
}
return nil, errStopToken
@@ -925,8 +924,8 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon
// make log instruction function
func makeLog(size int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- if interpreter.readOnly {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ if evm.readOnly {
return nil, ErrWriteProtection
}
topics := make([]common.Hash, size)
@@ -938,13 +937,13 @@ func makeLog(size int) executionFunc {
}
d := scope.Memory.GetCopy(mStart.Uint64(), mSize.Uint64())
- interpreter.evm.StateDB.AddLog(&types.Log{
+ evm.StateDB.AddLog(&types.Log{
Address: scope.Contract.Address(),
Topics: topics,
Data: d,
// This is a non-consensus field, but assigned here because
// core/state doesn't know the current block number.
- BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(),
+ BlockNumber: evm.Context.BlockNumber.Uint64(),
})
return nil, nil
@@ -952,7 +951,7 @@ func makeLog(size int) executionFunc {
}
// opPush1 is a specialized version of pushN
-func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush1(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -967,7 +966,7 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
}
// opPush2 is a specialized version of pushN
-func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+func opPush2(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = uint64(len(scope.Contract.Code))
integer = new(uint256.Int)
@@ -985,7 +984,7 @@ func opPush2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
// make push instruction function
func makePush(size uint64, pushByteSize int) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
var (
codeLen = len(scope.Contract.Code)
start = min(codeLen, int(*pc+1))
@@ -1004,9 +1003,9 @@ func makePush(size uint64, pushByteSize int) executionFunc {
}
// make dup instruction function
-func makeDup(size int64) executionFunc {
- return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- scope.Stack.dup(int(size))
+func makeDup(size int) executionFunc {
+ return func(pc *uint64, evm *EVM, scope *ScopeContext) ([]byte, error) {
+ scope.Stack.dup(size)
return nil, nil
}
}
diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go
index 8a82de5d8b..cd31829a7e 100644
--- a/core/vm/instructions_test.go
+++ b/core/vm/instructions_test.go
@@ -107,7 +107,7 @@ func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFu
expected := new(uint256.Int).SetBytes(common.Hex2Bytes(test.Expected))
stack.push(x)
stack.push(y)
- opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opFn(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", name, len(stack.data))
}
@@ -221,7 +221,7 @@ func TestAddMod(t *testing.T) {
stack.push(z)
stack.push(y)
stack.push(x)
- opAddmod(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opAddmod(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop()
if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %d, expected %x, got %x", i, expected, actual)
@@ -247,7 +247,7 @@ func TestWriteExpectedValues(t *testing.T) {
y := new(uint256.Int).SetBytes(common.Hex2Bytes(param.y))
stack.push(x)
stack.push(y)
- opFn(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opFn(&pc, evm, &ScopeContext{nil, stack, nil})
actual := stack.pop()
result[i] = TwoOperandTestcase{param.x, param.y, fmt.Sprintf("%064x", actual)}
}
@@ -296,7 +296,7 @@ func opBenchmark(bench *testing.B, op executionFunc, args ...string) {
for _, arg := range intArgs {
stack.push(arg)
}
- op(&pc, evm.interpreter, scope)
+ op(&pc, evm, scope)
stack.pop()
}
bench.StopTimer()
@@ -528,13 +528,13 @@ func TestOpMstore(t *testing.T) {
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v)))
stack.push(new(uint256.Int))
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
}
stack.push(new(uint256.Int).SetUint64(0x1))
stack.push(new(uint256.Int))
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
t.Fatalf("Mstore failed to overwrite previous value")
}
@@ -555,7 +555,7 @@ func BenchmarkOpMstore(bench *testing.B) {
for i := 0; i < bench.N; i++ {
stack.push(value)
stack.push(memStart)
- opMstore(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMstore(&pc, evm, &ScopeContext{mem, stack, nil})
}
}
@@ -581,14 +581,14 @@ func TestOpTstore(t *testing.T) {
stack.push(new(uint256.Int).SetBytes(value))
// push the location to the stack
stack.push(new(uint256.Int))
- opTstore(&pc, evm.interpreter, &scopeContext)
+ opTstore(&pc, evm, &scopeContext)
// there should be no elements on the stack after TSTORE
if stack.len() != 0 {
t.Fatal("stack wrong size")
}
// push the location to the stack
stack.push(new(uint256.Int))
- opTload(&pc, evm.interpreter, &scopeContext)
+ opTload(&pc, evm, &scopeContext)
// there should be one element on the stack after TLOAD
if stack.len() != 1 {
t.Fatal("stack wrong size")
@@ -613,7 +613,7 @@ func BenchmarkOpKeccak256(bench *testing.B) {
for i := 0; i < bench.N; i++ {
stack.push(uint256.NewInt(32))
stack.push(start)
- opKeccak256(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opKeccak256(&pc, evm, &ScopeContext{mem, stack, nil})
}
}
@@ -707,7 +707,7 @@ func TestRandom(t *testing.T) {
stack = newstack()
pc = uint64(0)
)
- opRandom(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opRandom(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
}
@@ -749,7 +749,7 @@ func TestBlobHash(t *testing.T) {
)
evm.SetTxContext(TxContext{BlobHashes: tt.hashes})
stack.push(uint256.NewInt(tt.idx))
- opBlobHash(&pc, evm.interpreter, &ScopeContext{nil, stack, nil})
+ opBlobHash(&pc, evm, &ScopeContext{nil, stack, nil})
if len(stack.data) != 1 {
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
}
@@ -889,7 +889,7 @@ func TestOpMCopy(t *testing.T) {
mem.Resize(memorySize)
}
// Do the copy
- opMcopy(&pc, evm.interpreter, &ScopeContext{mem, stack, nil})
+ opMcopy(&pc, evm, &ScopeContext{mem, stack, nil})
want := common.FromHex(strings.ReplaceAll(tc.want, " ", ""))
if have := mem.store; !bytes.Equal(want, have) {
t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have)
@@ -1001,7 +1001,7 @@ func TestOpCLZ(t *testing.T) {
}
stack.push(val)
- opCLZ(&pc, evm.interpreter, &ScopeContext{Stack: stack})
+ opCLZ(&pc, evm, &ScopeContext{Stack: stack})
if gotLen := stack.len(); gotLen != 1 {
t.Fatalf("stack length = %d; want 1", gotLen)
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index cb52d27d7d..abb7ea93c5 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -22,8 +22,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
)
@@ -40,6 +38,7 @@ type Config struct {
ExtraEips []int // Additional EIPS that are to be enabled
StatelessSelfValidation bool // Generate execution witnesses and self-check against them (testing purpose)
+ EnableWitnessStats bool // Whether trie access statistics collection is enabled
PrecompileOverrides PrecompileOverrides // Precompiles can be swapped / changed / wrapped as needed
NoMaxCodeSize bool // Ignore Max code size and max init code size limits
@@ -98,93 +97,27 @@ func (ctx *ScopeContext) ContractCode() []byte {
return ctx.Contract.Code
}
-// EVMInterpreter represents an EVM interpreter
-type EVMInterpreter struct {
- evm *EVM
- table *JumpTable
-
- hasher crypto.KeccakState // Keccak256 hasher instanceproto shared across opcodes
- hasherBuf common.Hash // Keccak256 hasher result array shared across opcodes
-
- readOnly bool // Whether to throw on stateful modifications
- returnData []byte // Last CALL's return data for subsequent reuse
-}
-
-// NewEVMInterpreter returns a new instanceproto of the Interpreter.
-func NewEVMInterpreter(evm *EVM) *EVMInterpreter {
- // If jump table was not initialised we set the default one.
- var table *JumpTable
- switch {
- case evm.chainRules.IsOsaka:
- table = &osakaInstructionSet
- case evm.chainRules.IsVerkle:
- // TODO replace with proper instruction set when fork is specified
- table = &verkleInstructionSet
- case evm.chainRules.IsPrague:
- table = &pragueInstructionSet
- case evm.chainRules.IsCancun:
- table = &cancunInstructionSet
- case evm.chainRules.IsShanghai:
- table = &shanghaiInstructionSet
- case evm.chainRules.IsMerge:
- table = &mergeInstructionSet
- case evm.chainRules.IsLondon:
- table = &londonInstructionSet
- case evm.chainRules.IsBerlin:
- table = &berlinInstructionSet
- case evm.chainRules.IsIstanbul:
- table = &istanbulInstructionSet
- case evm.chainRules.IsConstantinople:
- table = &constantinopleInstructionSet
- case evm.chainRules.IsByzantium:
- table = &byzantiumInstructionSet
- case evm.chainRules.IsEIP158:
- table = &spuriousDragonInstructionSet
- case evm.chainRules.IsEIP150:
- table = &tangerineWhistleInstructionSet
- case evm.chainRules.IsHomestead:
- table = &homesteadInstructionSet
- default:
- table = &frontierInstructionSet
- }
- var extraEips []int
- if len(evm.Config.ExtraEips) > 0 {
- // Deep-copy jumptable to prevent modification of opcodes in other tables
- table = copyJumpTable(table)
- }
- for _, eip := range evm.Config.ExtraEips {
- if err := EnableEIP(eip, table); err != nil {
- // Disable it, so caller can check if it's activated or not
- log.Error("EIP activation failed", "eip", eip, "error", err)
- } else {
- extraEips = append(extraEips, eip)
- }
- }
- evm.Config.ExtraEips = extraEips
- return &EVMInterpreter{evm: evm, table: table, hasher: crypto.NewKeccakState()}
-}
-
// Run loops and evaluates the contract's code with the given input data and returns
// the return byte-slice and an error if one occurred.
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation except for
// ErrExecutionReverted which means revert-and-keep-gas-left.
-func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
+func (evm *EVM) Run(contract *Contract, input []byte, readOnly bool) (ret []byte, err error) {
// Increment the call depth which is restricted to 1024
- in.evm.depth++
- defer func() { in.evm.depth-- }()
+ evm.depth++
+ defer func() { evm.depth-- }()
// Make sure the readOnly is only set if we aren't in readOnly yet.
// This also makes sure that the readOnly flag isn't removed for child calls.
- if readOnly && !in.readOnly {
- in.readOnly = true
- defer func() { in.readOnly = false }()
+ if readOnly && !evm.readOnly {
+ evm.readOnly = true
+ defer func() { evm.readOnly = false }()
}
// Reset the previous call's return data. It's unimportant to preserve the old buffer
// as every returning call will return new data anyway.
- in.returnData = nil
+ evm.returnData = nil
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
@@ -193,7 +126,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
var (
op OpCode // current opcode
- jumpTable *JumpTable = in.table
+ jumpTable *JumpTable = evm.table
mem = NewMemory() // bound memory
stack = newstack() // local stack
callContext = &ScopeContext{
@@ -207,11 +140,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
pc = uint64(0) // program counter
cost uint64
// copies used by tracer
- pcCopy uint64 // needed for the deferred EVMLogger
- gasCopy uint64 // for EVMLogger to log gas remaining before execution
- logged bool // deferred EVMLogger should ignore already logged steps
- res []byte // result of the opcode execution function
- debug = in.evm.Config.Tracer != nil
+ pcCopy uint64 // needed for the deferred EVMLogger
+ gasCopy uint64 // for EVMLogger to log gas remaining before execution
+ logged bool // deferred EVMLogger should ignore already logged steps
+ res []byte // result of the opcode execution function
+ debug = evm.Config.Tracer != nil
+ isEIP4762 = evm.chainRules.IsEIP4762
)
// Don't move this deferred function, it's placed before the OnOpcode-deferred method,
// so that it gets executed _after_: the OnOpcode needs the stacks before
@@ -227,11 +161,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
if err == nil {
return
}
- if !logged && in.evm.Config.Tracer.OnOpcode != nil {
- in.evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err))
+ if !logged && evm.Config.Tracer.OnOpcode != nil {
+ evm.Config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
}
- if logged && in.evm.Config.Tracer.OnFault != nil {
- in.evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, in.evm.depth, VMErrorFromErr(err))
+ if logged && evm.Config.Tracer.OnFault != nil {
+ evm.Config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, evm.depth, VMErrorFromErr(err))
}
}()
}
@@ -246,12 +180,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
logged, pcCopy, gasCopy = false, pc, contract.Gas
}
- if in.evm.chainRules.IsEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
+ if isEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
// if the PC ends up in a new "chunk" of verkleized code, charge the
// associated costs.
contractAddr := contract.Address()
- consumed, wanted := in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
- contract.UseGas(consumed, in.evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk)
+ consumed, wanted := evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
+ contract.UseGas(consumed, evm.Config.Tracer, tracing.GasChangeWitnessCodeChunk)
if consumed < wanted {
return nil, ErrOutOfGas
}
@@ -296,7 +230,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method can get the proper cost
var dynamicCost uint64
- dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
+ dynamicCost, err = operation.dynamicGas(evm, contract, stack, mem, memorySize)
cost += dynamicCost // for tracing
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
@@ -311,11 +245,11 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
// Do tracing before potential memory expansion
if debug {
- if in.evm.Config.Tracer.OnGasChange != nil {
- in.evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode)
+ if evm.Config.Tracer.OnGasChange != nil {
+ evm.Config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode)
}
- if in.evm.Config.Tracer.OnOpcode != nil {
- in.evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, in.returnData, in.evm.depth, VMErrorFromErr(err))
+ if evm.Config.Tracer.OnOpcode != nil {
+ evm.Config.Tracer.OnOpcode(pc, byte(op), gasCopy, cost, callContext, evm.returnData, evm.depth, VMErrorFromErr(err))
logged = true
}
}
@@ -324,7 +258,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
}
// execute the operation
- res, err = operation.execute(&pc, in, callContext)
+ res, err = operation.execute(&pc, evm, callContext)
if err != nil {
break
}
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 22eed8754f..d7a4d9da1d 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -23,7 +23,7 @@ import (
)
type (
- executionFunc func(pc *uint64, interpreter *EVMInterpreter, callContext *ScopeContext) ([]byte, error)
+ executionFunc func(pc *uint64, evm *EVM, callContext *ScopeContext) ([]byte, error)
gasFunc func(*EVM, *Contract, *Stack, *Memory, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*Stack) (size uint64, overflow bool)
diff --git a/core/vm/jumpdests.go b/core/vm/jumpdests.go
index 071b5f441d..1a30c1943f 100644
--- a/core/vm/jumpdests.go
+++ b/core/vm/jumpdests.go
@@ -29,7 +29,7 @@ type JumpDestCache interface {
}
// mapJumpDests is the default implementation of JumpDests using a map.
-// This implementation is not thread-safe and is meant to be used per EVM instanceproto.
+// This implementation is not thread-safe and is meant to be used per EVM instance.
type mapJumpDests map[common.Hash]BitVec
// newMapJumpDests creates a new map-based JumpDests implementation.
diff --git a/core/vm/program/program.go b/core/vm/program/program.go
index 5b9cfdcc5f..72cf6ff845 100644
--- a/core/vm/program/program.go
+++ b/core/vm/program/program.go
@@ -53,7 +53,7 @@ func (p *Program) add(op byte) *Program {
return p
}
-// pushBig creates a PUSHX instruction and pushes the given val.
+// doPush creates a PUSHX instruction and pushes the given val.
// - If the val is nil, it pushes zero
// - If the val is bigger than 32 bytes, it panics
func (p *Program) doPush(val *uint256.Int) {
diff --git a/crypto/bn256/gnark/g1.go b/crypto/bn256/gnark/g1.go
index 59e04cb247..3335a38310 100644
--- a/crypto/bn256/gnark/g1.go
+++ b/crypto/bn256/gnark/g1.go
@@ -5,6 +5,7 @@ import (
"math/big"
"github.com/consensys/gnark-crypto/ecc/bn254"
+ "github.com/ethereum/go-ethereum/common/bitutil"
)
// G1 is the affine representation of a G1 group element.
@@ -43,7 +44,7 @@ func (g *G1) Unmarshal(buf []byte) (int, error) {
return 0, errors.New("invalid G1 point size")
}
- if allZeroes(buf[:64]) {
+ if !bitutil.TestBytes(buf[:64]) {
// point at infinity
g.inner.X.SetZero()
g.inner.Y.SetZero()
diff --git a/crypto/bn256/gnark/g2.go b/crypto/bn256/gnark/g2.go
index 48a797e5a7..87ad88b9f1 100644
--- a/crypto/bn256/gnark/g2.go
+++ b/crypto/bn256/gnark/g2.go
@@ -4,6 +4,7 @@ import (
"errors"
"github.com/consensys/gnark-crypto/ecc/bn254"
+ "github.com/ethereum/go-ethereum/common/bitutil"
)
// G2 is the affine representation of a G2 group element.
@@ -31,7 +32,7 @@ func (g *G2) Unmarshal(buf []byte) (int, error) {
return 0, errors.New("invalid G2 point size")
}
- if allZeroes(buf[:128]) {
+ if !bitutil.TestBytes(buf[:128]) {
// point at infinity
g.inner.X.A0.SetZero()
g.inner.X.A1.SetZero()
diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go
index ee98f0246b..f127ab471d 100644
--- a/crypto/kzg4844/kzg4844.go
+++ b/crypto/kzg4844/kzg4844.go
@@ -31,13 +31,13 @@ import (
var content embed.FS
var (
- blobT = reflect.TypeOf(Blob{})
- commitmentT = reflect.TypeOf(Commitment{})
- proofT = reflect.TypeOf(Proof{})
-
- CellProofsPerBlob = 128
+ blobT = reflect.TypeFor[Blob]()
+ commitmentT = reflect.TypeFor[Commitment]()
+ proofT = reflect.TypeFor[Proof]()
)
+const CellProofsPerBlob = 128
+
// Blob represents a 4844 data blob.
type Blob [131072]byte
diff --git a/crypto/kzg4844/kzg4844_ckzg_cgo.go b/crypto/kzg4844/kzg4844_ckzg_cgo.go
index b215b19928..46509674b6 100644
--- a/crypto/kzg4844/kzg4844_ckzg_cgo.go
+++ b/crypto/kzg4844/kzg4844_ckzg_cgo.go
@@ -150,7 +150,7 @@ func ckzgComputeCellProofs(blob *Blob) ([]Proof, error) {
return p, nil
}
-// ckzgVerifyCellProofs verifies that the blob data corresponds to the provided commitment.
+// ckzgVerifyCellProofBatch verifies that the blob data corresponds to the provided commitment.
func ckzgVerifyCellProofBatch(blobs []Blob, commitments []Commitment, cellProofs []Proof) error {
ckzgIniter.Do(ckzgInit)
var (
diff --git a/crypto/kzg4844/kzg4844_gokzg.go b/crypto/kzg4844/kzg4844_gokzg.go
index 82ec8379d4..e9676ff1b8 100644
--- a/crypto/kzg4844/kzg4844_gokzg.go
+++ b/crypto/kzg4844/kzg4844_gokzg.go
@@ -115,7 +115,7 @@ func gokzgComputeCellProofs(blob *Blob) ([]Proof, error) {
return p, nil
}
-// gokzgVerifyCellProofs verifies that the blob data corresponds to the provided commitment.
+// gokzgVerifyCellProofBatch verifies that the blob data corresponds to the provided commitment.
func gokzgVerifyCellProofBatch(blobs []Blob, commitments []Commitment, cellProofs []Proof) error {
gokzgIniter.Do(gokzgInit)
diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go
index 85ba885d6f..b82b147e3c 100644
--- a/crypto/secp256k1/curve.go
+++ b/crypto/secp256k1/curve.go
@@ -35,29 +35,10 @@ package secp256k1
import (
"crypto/elliptic"
"math/big"
-)
-const (
- // number of bits in a big.Word
- wordBits = 32 << (uint64(^big.Word(0)) >> 63)
- // number of bytes in a big.Word
- wordBytes = wordBits / 8
+ "github.com/ethereum/go-ethereum/common/math"
)
-// readBits encodes the absolute value of bigint as big-endian bytes. Callers
-// must ensure that buf has enough space. If buf is too short the result will
-// be incomplete.
-func readBits(bigint *big.Int, buf []byte) {
- i := len(buf)
- for _, d := range bigint.Bits() {
- for j := 0; j < wordBytes && i > 0; j++ {
- i--
- buf[i] = byte(d)
- d >>= 8
- }
- }
-}
-
// This code is from https://github.com/ThePiachu/GoBit and implements
// several Koblitz elliptic curves over prime fields.
//
@@ -257,8 +238,8 @@ func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte {
byteLen := (bitCurve.BitSize + 7) >> 3
ret := make([]byte, 1+2*byteLen)
ret[0] = 4 // uncompressed point flag
- readBits(x, ret[1:1+byteLen])
- readBits(y, ret[1+byteLen:])
+ math.ReadBits(x, ret[1:1+byteLen])
+ math.ReadBits(y, ret[1+byteLen:])
return ret
}
diff --git a/crypto/secp256k1/scalar_mult_cgo.go b/crypto/secp256k1/scalar_mult_cgo.go
index d11c11faf8..b16c13f7e2 100644
--- a/crypto/secp256k1/scalar_mult_cgo.go
+++ b/crypto/secp256k1/scalar_mult_cgo.go
@@ -10,6 +10,8 @@ package secp256k1
import (
"math/big"
"unsafe"
+
+ "github.com/ethereum/go-ethereum/common/math"
)
/*
@@ -34,8 +36,8 @@ func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int,
// Do the multiplication in C, updating point.
point := make([]byte, 64)
- readBits(Bx, point[:32])
- readBits(By, point[32:])
+ math.ReadBits(Bx, point[:32])
+ math.ReadBits(By, point[32:])
pointPtr := (*C.uchar)(unsafe.Pointer(&point[0]))
scalarPtr := (*C.uchar)(unsafe.Pointer(&scalar[0]))
diff --git a/eth/api_backend.go b/eth/api_backend.go
index b8bcada5f6..9fecc12e03 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -384,11 +384,7 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type
return b.eth.blockchain.GetReceiptsByHash(hash), nil
}
-func (b *EthAPIBackend) GetCanonicalReceipt(
- tx *types.Transaction,
- blockHash common.Hash,
- blockNumber, blockIndex uint64,
-) (*types.Receipt, error) {
+func (b *EthAPIBackend) GetCanonicalReceipt(tx *types.Transaction, blockHash common.Hash, blockNumber, blockIndex uint64) (*types.Receipt, error) {
return b.eth.blockchain.GetCanonicalReceipt(tx, blockHash, blockNumber, blockIndex)
}
@@ -512,9 +508,7 @@ func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction
// not being finished. The caller must explicitly check the indexer progress.
//
// Notably, only the transaction in the canonical chain is visible.
-func (b *EthAPIBackend) GetCanonicalTransaction(
- txHash common.Hash,
-) (bool, *types.Transaction, common.Hash, uint64, uint64) {
+func (b *EthAPIBackend) GetCanonicalTransaction(txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64) {
lookup, tx := b.eth.blockchain.GetCanonicalTransaction(txHash)
if lookup == nil || tx == nil {
return false, nil, common.Hash{}, 0, 0
diff --git a/eth/api_debug.go b/eth/api_debug.go
index f128daa1bb..367cf7837e 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -474,7 +474,7 @@ func generateWitness(blockchain *core.BlockChain, block *types.Block) (*stateles
return nil, fmt.Errorf("failed to retrieve parent state: %w", err)
}
- statedb.StartPrefetcher("debug_execution_witness", witness)
+ statedb.StartPrefetcher("debug_execution_witness", witness, nil)
defer statedb.StopPrefetcher()
res, err := blockchain.Processor().Process(block, statedb, *blockchain.GetVMConfig())
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index efe4510c68..fe1fcef68c 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -308,7 +308,7 @@ func TestGetModifiedAccounts(t *testing.T) {
})
defer blockChain.Stop()
- // Create a debug API instanceproto.
+ // Create a debug API instance.
api := NewDebugAPI(&Ethereum{blockchain: blockChain})
// Test GetModifiedAccountsByNumber
diff --git a/eth/backend.go b/eth/backend.go
index 78262d5845..cfb2274635 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -355,7 +355,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
txPools = append(txPools, eth.blobTxPool)
}
- // if interop is enabled, establish an Interop Filter connected to this Ethereum instanceproto's
+ // if interop is enabled, establish an Interop Filter connected to this Ethereum instance's
// simulated logs and message safety check functions
poolFilters := []txpool.IngressFilter{}
if config.InteropMessageRPC != "" && config.InteropMempoolFiltering {
@@ -502,7 +502,7 @@ func makeExtraData(extra []byte) []byte {
func (s *Ethereum) APIs() []rpc.API {
apis := ethapi.GetAPIs(s.APIBackend)
- // Append any PeriodSequencer APIs as enabled
+ // Append any Sequencer APIs as enabled
if s.config.RollupSequencerTxConditionalEnabled {
log.Info("Enabling eth_sendRawTransactionConditional endpoint support")
costRateLimit := rate.Limit(s.config.RollupSequencerTxConditionalCostRateLimit)
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 9b13be9c29..2a9f3a68f9 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -19,20 +19,20 @@ package catalyst
import (
"bytes"
- "crypto/sha256"
"errors"
"fmt"
+ "reflect"
"strconv"
"sync"
"sync/atomic"
"time"
+ "unicode"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/version"
@@ -83,48 +83,16 @@ const (
beaconUpdateWarnFrequency = 5 * time.Minute
)
-// All methods provided over the engine endpoint.
-var caps = []string{
- "engine_forkchoiceUpdatedV1",
- "engine_forkchoiceUpdatedV2",
- "engine_forkchoiceUpdatedV3",
- "engine_forkchoiceUpdatedWithWitnessV1",
- "engine_forkchoiceUpdatedWithWitnessV2",
- "engine_forkchoiceUpdatedWithWitnessV3",
- "engine_exchangeTransitionConfigurationV1",
- "engine_getPayloadV1",
- "engine_getPayloadV2",
- "engine_getPayloadV3",
- "engine_getPayloadV4",
- "engine_getPayloadV5",
- "engine_getBlobsV1",
- "engine_getBlobsV2",
- "engine_newPayloadV1",
- "engine_newPayloadV2",
- "engine_newPayloadV3",
- "engine_newPayloadV4",
- "engine_newPayloadWithWitnessV1",
- "engine_newPayloadWithWitnessV2",
- "engine_newPayloadWithWitnessV3",
- "engine_newPayloadWithWitnessV4",
- "engine_executeStatelessPayloadV1",
- "engine_executeStatelessPayloadV2",
- "engine_executeStatelessPayloadV3",
- "engine_executeStatelessPayloadV4",
- "engine_getPayloadBodiesByHashV1",
- "engine_getPayloadBodiesByHashV2",
- "engine_getPayloadBodiesByRangeV1",
- "engine_getPayloadBodiesByRangeV2",
- "engine_getClientVersionV1",
-}
-
var (
// Number of blobs requested via getBlobsV2
getBlobsRequestedCounter = metrics.NewRegisteredCounter("engine/getblobs/requested", nil)
+
// Number of blobs requested via getBlobsV2 that are present in the blobpool
getBlobsAvailableCounter = metrics.NewRegisteredCounter("engine/getblobs/available", nil)
+
// Number of times getBlobsV2 responded with “hit”
getBlobsV2RequestHit = metrics.NewRegisteredCounter("engine/getblobs/hit", nil)
+
// Number of times getBlobsV2 responded with “miss”
getBlobsV2RequestMiss = metrics.NewRegisteredCounter("engine/getblobs/miss", nil)
)
@@ -522,29 +490,15 @@ func (api *ConsensusAPI) GetBlobsV1(hashes []common.Hash) ([]*engine.BlobAndProo
if len(hashes) > 128 {
return nil, engine.TooLargeRequest.With(fmt.Errorf("requested blob count too large: %v", len(hashes)))
}
- var (
- res = make([]*engine.BlobAndProofV1, len(hashes))
- hasher = sha256.New()
- index = make(map[common.Hash]int)
- sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
- )
-
- for i, hash := range hashes {
- index[hash] = i
+ blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion0)
+ if err != nil {
+ return nil, engine.InvalidParams.With(err)
}
- for i, sidecar := range sidecars {
- if res[i] != nil || sidecar == nil {
- // already filled
- continue
- }
- for cIdx, commitment := range sidecar.Commitments {
- computed := kzg4844.CalcBlobHashV1(hasher, &commitment)
- if idx, ok := index[computed]; ok {
- res[idx] = &engine.BlobAndProofV1{
- Blob: sidecar.Blobs[cIdx][:],
- Proof: sidecar.Proofs[cIdx][:],
- }
- }
+ res := make([]*engine.BlobAndProofV1, len(hashes))
+ for i := 0; i < len(blobs); i++ {
+ res[i] = &engine.BlobAndProofV1{
+ Blob: blobs[i][:],
+ Proof: proofs[i][0][:],
}
}
return res, nil
@@ -566,47 +520,19 @@ func (api *ConsensusAPI) GetBlobsV2(hashes []common.Hash) ([]*engine.BlobAndProo
}
getBlobsV2RequestHit.Inc(1)
- // pull up the blob hashes
- var (
- res = make([]*engine.BlobAndProofV2, len(hashes))
- index = make(map[common.Hash][]int)
- sidecars = api.eth.BlobTxPool().GetBlobs(hashes)
- )
-
- for i, hash := range hashes {
- index[hash] = append(index[hash], i)
+ blobs, _, proofs, err := api.eth.BlobTxPool().GetBlobs(hashes, types.BlobSidecarVersion1)
+ if err != nil {
+ return nil, engine.InvalidParams.With(err)
}
- for i, sidecar := range sidecars {
- if res[i] != nil {
- // already filled
- continue
+ res := make([]*engine.BlobAndProofV2, len(hashes))
+ for i := 0; i < len(blobs); i++ {
+ var cellProofs []hexutil.Bytes
+ for _, proof := range proofs[i] {
+ cellProofs = append(cellProofs, proof[:])
}
- if sidecar == nil {
- // not found, return empty response
- return nil, nil
- }
- if sidecar.Version != types.BlobSidecarVersion1 {
- log.Info("GetBlobs queried V0 transaction: index %v, blobhashes %v", index, sidecar.BlobHashes())
- return nil, nil
- }
- blobHashes := sidecar.BlobHashes()
- for bIdx, hash := range blobHashes {
- if idxes, ok := index[hash]; ok {
- proofs, err := sidecar.CellProofsAt(bIdx)
- if err != nil {
- return nil, engine.InvalidParams.With(err)
- }
- var cellProofs []hexutil.Bytes
- for _, proof := range proofs {
- cellProofs = append(cellProofs, proof[:])
- }
- for _, idx := range idxes {
- res[idx] = &engine.BlobAndProofV2{
- Blob: sidecar.Blobs[bIdx][:],
- CellProofs: cellProofs,
- }
- }
- }
+ res[i] = &engine.BlobAndProofV2{
+ Blob: blobs[i][:],
+ CellProofs: cellProofs,
}
}
return res, nil
@@ -1014,6 +940,15 @@ func (api *ConsensusAPI) checkFork(timestamp uint64, forks ...forks.Fork) bool {
// ExchangeCapabilities returns the current methods provided by this node.
func (api *ConsensusAPI) ExchangeCapabilities([]string) []string {
+ valueT := reflect.TypeOf(api)
+ caps := make([]string, 0, valueT.NumMethod())
+ for i := 0; i < valueT.NumMethod(); i++ {
+ name := []rune(valueT.Method(i).Name)
+ if string(name) == "ExchangeCapabilities" {
+ continue
+ }
+ caps = append(caps, "engine_"+string(unicode.ToLower(name[0]))+string(name[1:]))
+ }
return caps
}
diff --git a/eth/catalyst/api_optimism_test.go b/eth/catalyst/api_optimism_test.go
index fc71cca821..20c3864926 100644
--- a/eth/catalyst/api_optimism_test.go
+++ b/eth/catalyst/api_optimism_test.go
@@ -162,7 +162,7 @@ func TestCheckOptimismPayload(t *testing.T) {
ExtraData: validExtraData,
},
cfg: postJovian(),
- expected: errors.New("jovian extraData should be 17 bytes, got 9"),
+ expected: errors.New("MinBaseFee extraData should be 17 bytes, got 9"),
},
}
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index 31cce1e0a4..a7c1a7e00f 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -200,7 +200,7 @@ func (s *SyncStatusSubscription) Unsubscribe() {
}
// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
-// The given channel must receive interface values, the result can either.
+// The given channel must receive interface values, the result can either be a SyncingResult or false.
func (api *DownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription {
api.installSyncSubscription <- status
return &SyncStatusSubscription{api: api, c: status}
diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go
index 7b30684133..03f17b1a52 100644
--- a/eth/downloader/beacondevsync.go
+++ b/eth/downloader/beacondevsync.go
@@ -52,7 +52,8 @@ func (d *Downloader) GetHeader(hash common.Hash) (*types.Header, error) {
for _, peer := range d.peers.peers {
if peer == nil {
- return nil, errors.New("could not find peer")
+ log.Warn("Encountered nil peer while retrieving sync target", "hash", hash)
+ continue
}
// Found a peer, attempt to retrieve the header whilst blocking and
// retry if it fails for whatever reason
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 2ed2d9fc43..16d64994d0 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -156,8 +156,8 @@ type Downloader struct {
// Progress reporting metrics
syncStartBlock uint64 // Head snap block when Geth was started
- syncStartTime time.Time // Time instanceproto when chain sync started
- syncLogTime time.Time // Time instanceproto when status was last reported
+ syncStartTime time.Time // Time instance when chain sync started
+ syncLogTime time.Time // Time instance when status was last reported
}
// BlockChain encapsulates functions required to sync a (full or snap) blockchain.
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index a00ae45f7c..62d4c588b7 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -36,13 +36,13 @@ import (
)
// makeChain creates a chain of n blocks starting at and including parent.
-// the returned hash chain is ordered head->parent. In addition, every 3rd block
-// contains a transaction and every 5th an uncle to allow testing correct block
-// reassembly.
+// The returned hash chain is ordered head->parent.
+// If empty is false, every second block (i%2==0) contains one transaction.
+// No uncles are added.
func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {
blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {
block.SetCoinbase(common.Address{seed})
- // Add one tx to every secondblock
+ // Add one tx to every second block
if !empty && i%2 == 0 {
signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp())
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey)
diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go
index d626bf900e..24b298dfc9 100644
--- a/eth/gasestimator/gasestimator.go
+++ b/eth/gasestimator/gasestimator.go
@@ -62,6 +62,23 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
if call.GasLimit >= params.TxGas {
hi = call.GasLimit
}
+
+ // Cap the maximum gas allowance according to EIP-7825 if the estimation targets Osaka
+ if hi > params.MaxTxGas {
+ blockNumber, blockTime := opts.Header.Number, opts.Header.Time
+ if opts.BlockOverrides != nil {
+ if opts.BlockOverrides.Number != nil {
+ blockNumber = opts.BlockOverrides.Number.ToInt()
+ }
+ if opts.BlockOverrides.Time != nil {
+ blockTime = uint64(*opts.BlockOverrides.Time)
+ }
+ }
+ if opts.Config.IsOsaka(blockNumber, blockTime) {
+ hi = params.MaxTxGas
+ }
+ }
+
// Normalize the max fee per gas the call is willing to spend.
var feeCap *big.Int
if call.GasFeeCap != nil {
@@ -209,6 +226,9 @@ func execute(ctx context.Context, call *core.Message, opts *Options, gasLimit ui
if errors.Is(err, core.ErrIntrinsicGas) {
return true, nil, nil // Special case, raise gas limit
}
+ if errors.Is(err, core.ErrGasLimitTooHigh) {
+ return true, nil, nil // Special case, lower gas limit
+ }
return true, nil, err // Bail out
}
return result.Failed(), result, nil
diff --git a/eth/handler.go b/eth/handler.go
index 7b8116351c..8a146fc9aa 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -17,21 +17,22 @@
package eth
import (
+ "cmp"
+ crand "crypto/rand"
"errors"
"maps"
"math"
- "math/big"
"slices"
"sync"
"sync/atomic"
"time"
+ "github.com/dchest/siphash"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/fetcher"
@@ -122,9 +123,10 @@ type handler struct {
noTxGossip bool
- downloader *downloader.Downloader
- txFetcher *fetcher.TxFetcher
- peers *peerSet
+ downloader *downloader.Downloader
+ txFetcher *fetcher.TxFetcher
+ peers *peerSet
+ txBroadcastKey [16]byte
eventMux *event.TypeMux
txsCh chan core.NewTxsEvent
@@ -157,6 +159,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
noTxGossip: config.NoTxGossip,
chain: config.Chain,
peers: newPeerSet(),
+ txBroadcastKey: newBroadcastChoiceKey(),
requiredBlocks: config.RequiredBlocks,
quitSync: make(chan struct{}),
handlerDoneCh: make(chan struct{}),
@@ -357,6 +360,8 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
case <-timeout.C:
peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name())
h.removePeer(peer.ID())
+ case <-dead:
+ // Peer handler terminated, abort all goroutines
}
}(number, hash, req)
}
@@ -483,58 +488,40 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
- )
- // Broadcast transactions to a batch of peers not knowing about it
- direct := big.NewInt(int64(math.Sqrt(float64(h.peers.len())))) // Approximate number of peers to broadcast to
- if direct.BitLen() == 0 {
- direct = big.NewInt(1)
- }
- total := new(big.Int).Exp(direct, big.NewInt(2), nil) // Stabilise total peer count a bit based on sqrt peers
- var (
- signer = types.LatestSigner(h.chain.Config()) // Don't care about chain status, we just need *a* sender
- hasher = crypto.NewKeccakState()
- hash = make([]byte, 32)
+ signer = types.LatestSigner(h.chain.Config())
+ choice = newBroadcastChoice(h.nodeID, h.txBroadcastKey)
+ peers = h.peers.all()
)
+
for _, tx := range txs {
- var maybeDirect bool
+ var directSet map[*ethPeer]struct{}
switch {
case tx.Type() == types.BlobTxType:
blobTxs++
case tx.Size() > txMaxBroadcastSize:
largeTxs++
default:
- maybeDirect = true
+ // Get transaction sender address. Here we can ignore any error
+ // since we're just interested in any value.
+ txSender, _ := types.Sender(signer, tx)
+ directSet = choice.choosePeers(peers, txSender)
}
- // Send the transaction (if it's small enough) directly to a subset of
- // the peers that have not received it yet, ensuring that the flow of
- // transactions is grouped by account to (try and) avoid nonce gaps.
- //
- // To do this, we hash the local enode IW with together with a peer's
- // enode ID together with the transaction sender and broadcast if
- // `sha(self, peer, sender) mod peers < sqrt(peers)`.
- for _, peer := range h.peers.peersWithoutTransaction(tx.Hash()) {
- var broadcast bool
- if maybeDirect {
- hasher.Reset()
- hasher.Write(h.nodeID.Bytes())
- hasher.Write(peer.Node().ID().Bytes())
-
- from, _ := types.Sender(signer, tx) // Ignore error, we only use the addr as a propagation target splitter
- hasher.Write(from.Bytes())
-
- hasher.Read(hash)
- if new(big.Int).Mod(new(big.Int).SetBytes(hash), total).Cmp(direct) < 0 {
- broadcast = true
- }
+
+ for _, peer := range peers {
+ if peer.KnownTransaction(tx.Hash()) {
+ continue
}
- if broadcast {
+ if _, ok := directSet[peer]; ok {
+ // Send direct.
txset[peer] = append(txset[peer], tx.Hash())
} else {
+ // Send announcement.
annos[peer] = append(annos[peer], tx.Hash())
}
}
}
+
for peer, hashes := range txset {
directCount += len(hashes)
peer.AsyncSendTransactions(hashes)
@@ -597,7 +584,7 @@ func newBlockRangeState(chain *core.BlockChain, typeMux *event.TypeMux) *blockRa
return st
}
-// blockRangeBroadcastLoop announces changes in locally-available block range to peers.
+// blockRangeLoop announces changes in locally-available block range to peers.
// The range to announce is the range that is available in the store, so it's not just
// about imported blocks.
func (h *handler) blockRangeLoop(st *blockRangeState) {
@@ -699,3 +686,62 @@ func (st *blockRangeState) stop() {
func (st *blockRangeState) currentRange() eth.BlockRangeUpdatePacket {
return *st.next.Load()
}
+
+// broadcastChoice implements a deterministic random choice of peers. This is designed
+// specifically for choosing which peer receives a direct broadcast of a transaction.
+//
+// The choice is made based on the involved p2p node IDs and the transaction sender,
+// ensuring that the flow of transactions is grouped by account to (try and) avoid nonce
+// gaps.
+type broadcastChoice struct {
+ self enode.ID
+ key [16]byte
+ buffer map[*ethPeer]struct{}
+ tmp []broadcastPeer
+}
+
+type broadcastPeer struct {
+ p *ethPeer
+ score uint64
+}
+
+func newBroadcastChoiceKey() (k [16]byte) {
+ crand.Read(k[:])
+ return k
+}
+
+func newBroadcastChoice(self enode.ID, key [16]byte) *broadcastChoice {
+ return &broadcastChoice{
+ self: self,
+ key: key,
+ buffer: make(map[*ethPeer]struct{}),
+ }
+}
+
+// choosePeers selects the peers that will receive a direct transaction broadcast message.
+// Note the return value will only stay valid until the next call to choosePeers.
+func (bc *broadcastChoice) choosePeers(peers []*ethPeer, txSender common.Address) map[*ethPeer]struct{} {
+ // Compute randomized scores.
+ bc.tmp = slices.Grow(bc.tmp[:0], len(peers))[:len(peers)]
+ hash := siphash.New(bc.key[:])
+ for i, peer := range peers {
+ hash.Reset()
+ hash.Write(bc.self[:])
+ hash.Write(peer.Peer.Peer.ID().Bytes())
+ hash.Write(txSender[:])
+ bc.tmp[i] = broadcastPeer{peer, hash.Sum64()}
+ }
+
+ // Sort by score.
+ slices.SortFunc(bc.tmp, func(a, b broadcastPeer) int {
+ return cmp.Compare(a.score, b.score)
+ })
+
+ // Take top n.
+ clear(bc.buffer)
+ n := int(math.Ceil(math.Sqrt(float64(len(bc.tmp)))))
+ for i := range n {
+ bc.buffer[bc.tmp[i].p] = struct{}{}
+ }
+ return bc.buffer
+}
diff --git a/eth/handler_test.go b/eth/handler_test.go
index d0da098430..b37e6227f4 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -17,9 +17,12 @@
package eth
import (
+ "maps"
"math/big"
+ "math/rand"
"sort"
"sync"
+ "testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
@@ -29,8 +32,11 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/holiman/uint256"
@@ -212,3 +218,102 @@ func (b *testHandler) close() {
b.handler.Stop()
b.chain.Stop()
}
+
+func TestBroadcastChoice(t *testing.T) {
+ self := enode.HexID("1111111111111111111111111111111111111111111111111111111111111111")
+ choice49 := newBroadcastChoice(self, [16]byte{1})
+ choice50 := newBroadcastChoice(self, [16]byte{1})
+
+ // Create test peers and random tx sender addresses.
+ rand := rand.New(rand.NewSource(33))
+ txsenders := make([]common.Address, 400)
+ for i := range txsenders {
+ rand.Read(txsenders[i][:])
+ }
+ peers := createTestPeers(rand, 50)
+ defer closePeers(peers)
+
+ // Evaluate choice49 first.
+ expectedCount := 7 // sqrt(49)
+ var chosen49 = make([]map[*ethPeer]struct{}, len(txsenders))
+ for i, txSender := range txsenders {
+ set := choice49.choosePeers(peers[:49], txSender)
+ chosen49[i] = maps.Clone(set)
+
+ // Sanity check choices. Here we check that the function selects different peers
+ // for different transaction senders.
+ if len(set) != expectedCount {
+ t.Fatalf("choice49 produced wrong count %d, want %d", len(set), expectedCount)
+ }
+ if i > 0 && maps.Equal(set, chosen49[i-1]) {
+ t.Errorf("choice49 for tx %d is equal to tx %d", i, i-1)
+ }
+ }
+
+ // Evaluate choice50 for the same peers and transactions. It should always yield more
+ // peers than choice49, and the chosen set should be a superset of choice49's.
+ for i, txSender := range txsenders {
+ set := choice50.choosePeers(peers[:50], txSender)
+ if len(set) < len(chosen49[i]) {
+ t.Errorf("for tx %d, choice50 has less peers than choice49", i)
+ }
+ for p := range chosen49[i] {
+ if _, ok := set[p]; !ok {
+ t.Errorf("for tx %d, choice50 did not choose peer %v, but choice49 did", i, p.ID())
+ }
+ }
+ }
+}
+
+func BenchmarkBroadcastChoice(b *testing.B) {
+ b.Run("50", func(b *testing.B) {
+ benchmarkBroadcastChoice(b, 50)
+ })
+ b.Run("200", func(b *testing.B) {
+ benchmarkBroadcastChoice(b, 200)
+ })
+ b.Run("500", func(b *testing.B) {
+ benchmarkBroadcastChoice(b, 500)
+ })
+}
+
+// This measures the overhead of sending one transaction to N peers.
+func benchmarkBroadcastChoice(b *testing.B, npeers int) {
+ rand := rand.New(rand.NewSource(33))
+ peers := createTestPeers(rand, npeers)
+ defer closePeers(peers)
+
+ txsenders := make([]common.Address, b.N)
+ for i := range txsenders {
+ rand.Read(txsenders[i][:])
+ }
+
+ self := enode.HexID("1111111111111111111111111111111111111111111111111111111111111111")
+ choice := newBroadcastChoice(self, [16]byte{1})
+
+ b.ResetTimer()
+ for i := range b.N {
+ set := choice.choosePeers(peers, txsenders[i])
+ if len(set) == 0 {
+ b.Fatal("empty result")
+ }
+ }
+}
+
+func createTestPeers(rand *rand.Rand, n int) []*ethPeer {
+ peers := make([]*ethPeer, n)
+ for i := range peers {
+ var id enode.ID
+ rand.Read(id[:])
+ p2pPeer := p2p.NewPeer(id, "test", nil)
+ ep := eth.NewPeer(eth.ETH69, p2pPeer, nil, nil)
+ peers[i] = ðPeer{Peer: ep}
+ }
+ return peers
+}
+
+func closePeers(peers []*ethPeer) {
+ for _, p := range peers {
+ p.Close()
+ }
+}
diff --git a/eth/peerset.go b/eth/peerset.go
index 6b0aff226c..e6f623f90c 100644
--- a/eth/peerset.go
+++ b/eth/peerset.go
@@ -19,9 +19,10 @@ package eth
import (
"errors"
"fmt"
+ "maps"
+ "slices"
"sync"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/p2p"
@@ -191,19 +192,12 @@ func (ps *peerSet) peer(id string) *ethPeer {
return ps.peers[id]
}
-// peersWithoutTransaction retrieves a list of peers that do not have a given
-// transaction in their set of known hashes.
-func (ps *peerSet) peersWithoutTransaction(hash common.Hash) []*ethPeer {
+// all returns all current peers.
+func (ps *peerSet) all() []*ethPeer {
ps.lock.RLock()
defer ps.lock.RUnlock()
- list := make([]*ethPeer, 0, len(ps.peers))
- for _, p := range ps.peers {
- if !p.KnownTransaction(hash) {
- list = append(list, p)
- }
- }
- return list
+ return slices.Collect(maps.Values(ps.peers))
}
// len returns if the current number of `eth` peers in the set. Since the `snap`
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 6fcaed450f..538cd03077 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -502,10 +502,10 @@ type Syncer struct {
storageHealed uint64 // Number of storage slots downloaded during the healing stage
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
- startTime time.Time // Time instanceproto when snapshot sync started
- healStartTime time.Time // Time instanceproto when the state healing started
+ startTime time.Time // Time instance when snapshot sync started
+ healStartTime time.Time // Time instance when the state healing started
syncTimeOnce sync.Once // Ensure that the state sync time is uploaded only once
- logTime time.Time // Time instanceproto when status was last reported
+ logTime time.Time // Time instance when status was last reported
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
diff --git a/eth/sync_test.go b/eth/sync_test.go
index cad3a4732e..dc295f2790 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -88,9 +88,17 @@ func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) {
if err := empty.handler.downloader.BeaconSync(ethconfig.SnapSync, full.chain.CurrentBlock(), nil); err != nil {
t.Fatal("sync failed:", err)
}
- time.Sleep(time.Second * 5) // Downloader internally has to wait a timer (3s) to be expired before exiting
-
- if empty.handler.snapSync.Load() {
- t.Fatalf("snap sync not disabled after successful synchronisation")
+ // Downloader internally has to wait for a timer (3s) to be expired before
+ // exiting. Poll after to determine if sync is disabled.
+ time.Sleep(time.Second * 3)
+ for timeout := time.After(time.Second); ; {
+ select {
+ case <-timeout:
+ t.Fatalf("snap sync not disabled after successful synchronisation")
+ case <-time.After(100 * time.Millisecond):
+ if !empty.handler.snapSync.Load() {
+ return
+ }
+ }
}
}
diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go
index 33b1d37732..6b33ec54ba 100644
--- a/eth/syncer/syncer.go
+++ b/eth/syncer/syncer.go
@@ -89,6 +89,7 @@ func (s *Syncer) run() {
target *types.Header
ticker = time.NewTicker(time.Second * 5)
)
+ defer ticker.Stop()
for {
select {
case req := <-s.request:
@@ -99,7 +100,7 @@ func (s *Syncer) run() {
)
for {
if retries >= 10 {
- req.errc <- fmt.Errorf("sync target is not avaibale, %x", req.hash)
+ req.errc <- fmt.Errorf("sync target is not available, %x", req.hash)
break
}
select {
@@ -186,7 +187,7 @@ type API struct {
s *Syncer
}
-// NewAPI creates a new debug API instanceproto.
+// NewAPI creates a new debug API instance.
func NewAPI(s *Syncer) *API {
return &API{s: s}
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 44301d92bc..837a3b124f 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -614,7 +614,7 @@ func TestTraceTransaction(t *testing.T) {
b.AddTx(tx)
target = tx.Hash()
})
- defer backend.chain.Stop()
+ defer backend.teardown()
api := NewAPI(backend)
result, err := api.TraceTransaction(context.Background(), target, nil)
if err != nil {
@@ -720,7 +720,7 @@ func TestTraceBlock(t *testing.T) {
b.AddTx(tx)
txHash = tx.Hash()
})
- defer backend.chain.Stop()
+ defer backend.teardown()
api := NewAPI(backend)
testSuite := []struct {
@@ -871,7 +871,7 @@ func TestTracingWithOverrides(t *testing.T) {
signer, accounts[0].key)
b.AddTx(tx)
})
- defer backend.chain.Stop()
+ defer backend.teardown()
api := NewAPI(backend)
randomAccounts := newAccounts(3)
type res struct {
@@ -1295,6 +1295,7 @@ func TestTraceChain(t *testing.T) {
nonce += 1
}
})
+ defer backend.teardown()
backend.refHook = func() { ref.Add(1) }
backend.relHook = func() { rel.Add(1) }
api := NewAPI(backend)
@@ -1403,7 +1404,7 @@ func TestTraceBlockWithBasefee(t *testing.T) {
txHash = tx.Hash()
baseFee.Set(b.BaseFee())
})
- defer backend.chain.Stop()
+ defer backend.teardown()
api := NewAPI(backend)
testSuite := []struct {
@@ -1489,7 +1490,7 @@ func TestStandardTraceBlockToFile(t *testing.T) {
b.AddTx(tx)
txHashs = append(txHashs, tx.Hash())
})
- defer backend.chain.Stop()
+ defer backend.teardown()
testSuite := []struct {
blockNumber rpc.BlockNumber
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index e5766b5eb3..c1e034125d 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -321,7 +321,7 @@ func TestInternals(t *testing.T) {
byte(vm.LOG0),
},
tracer: mkTracer("prestateTracer", nil),
- want: fmt.Sprintf(`{"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0"},"%s":{"balance":"0x1c6bf52634000"}}`, originHex),
+ want: fmt.Sprintf(`{"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600164ffffffffff60016000f560ff6000a0","codeHash":"0x27be17a236425a9b513d736c4bb84eca4505a15564cae640e85558cf4d7ff7bb"},"%s":{"balance":"0x1c6bf52634000"}}`, originHex),
},
{
// CREATE2 which requires padding memory by prestate tracer
@@ -340,7 +340,7 @@ func TestInternals(t *testing.T) {
byte(vm.LOG0),
},
tracer: mkTracer("prestateTracer", nil),
- want: fmt.Sprintf(`{"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0"},"%s":{"balance":"0x1c6bf52634000"}}`, originHex),
+ want: fmt.Sprintf(`{"0x00000000000000000000000000000000deadbeef":{"balance":"0x0","code":"0x6001600052600160ff60016000f560ff6000a0","codeHash":"0x5544040a7fd107ba8164108904724a38fb9c664daae88a5cc53580841e648edf"},"%s":{"balance":"0x1c6bf52634000"}}`, originHex),
},
} {
t.Run(tc.name, func(t *testing.T) {
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index de04ce457b..3b6937f67e 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -110,6 +110,9 @@ func testPrestateTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
+ if vmRet.Failed() {
+ t.Logf("(warn) transaction failed: %v", vmRet.Err)
+ }
tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, nil)
// Retrieve the trace result and compare against the expected
res, err := tracer.GetResult()
diff --git a/eth/tracers/internal/tracetest/supply_test.go b/eth/tracers/internal/tracetest/supply_test.go
index 8aedc9d564..2b5a8212aa 100644
--- a/eth/tracers/internal/tracetest/supply_test.go
+++ b/eth/tracers/internal/tracetest/supply_test.go
@@ -77,7 +77,7 @@ func TestSupplyOmittedFields(t *testing.T) {
out, _, err := testSupplyTracer(t, gspec, func(b *core.BlockGen) {
b.SetPoS()
- })
+ }, 1)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -120,7 +120,7 @@ func TestSupplyGenesisAlloc(t *testing.T) {
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
}
- out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc)
+ out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc, 1)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -148,7 +148,55 @@ func TestSupplyRewards(t *testing.T) {
ParentHash: common.HexToHash("0xadeda0a83e337b6c073e3f0e9a17531a04009b397a9588c093b628f21b8bc5a3"),
}
- out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc)
+ out, _, err := testSupplyTracer(t, gspec, emptyBlockGenerationFunc, 1)
+ if err != nil {
+ t.Fatalf("failed to test supply tracer: %v", err)
+ }
+
+ actual := out[expected.Number]
+
+ compareAsJSON(t, expected, actual)
+}
+
+func TestSupplyRewardsWithUncle(t *testing.T) {
+ var (
+ config = *params.AllEthashProtocolChanges
+
+ gspec = &core.Genesis{
+ Config: &config,
+ }
+ )
+
+ // Base reward for the miner
+ baseReward := ethash.ConstantinopleBlockReward.ToBig()
+ // Miner reward for uncle inclusion is 1/32 of the base reward
+ uncleInclusionReward := new(big.Int).Rsh(baseReward, 5)
+ // Uncle miner reward for an uncle that is 1 block behind is 7/8 of the base reward
+ uncleReward := big.NewInt(7)
+ uncleReward.Mul(uncleReward, baseReward).Rsh(uncleReward, 3)
+
+ totalReward := baseReward.Add(baseReward, uncleInclusionReward).Add(baseReward, uncleReward)
+
+ expected := supplyInfo{
+ Issuance: &supplyInfoIssuance{
+ Reward: (*hexutil.Big)(totalReward),
+ },
+ Number: 3,
+ Hash: common.HexToHash("0x0737d31f8671c18d32b5143833cfa600e4264df62324c9de569668c6de9eed6d"),
+ ParentHash: common.HexToHash("0x45af6557df87719cb3c7e6f8a98b61508ea74a797733191aececb4c2ec802447"),
+ }
+
+ // Generate a new chain where block 3 includes an uncle
+ uncleGenerationFunc := func(b *core.BlockGen) {
+ if b.Number().Uint64() == 3 {
+ prevBlock := b.PrevBlock(1) // Block 2
+ uncle := types.CopyHeader(prevBlock.Header())
+ uncle.Extra = []byte("uncle!")
+ b.AddUncle(uncle)
+ }
+ }
+
+ out, _, err := testSupplyTracer(t, gspec, uncleGenerationFunc, 3)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -195,7 +243,7 @@ func TestSupplyEip1559Burn(t *testing.T) {
b.AddTx(tx)
}
- out, chain, err := testSupplyTracer(t, gspec, eip1559BlockGenerationFunc)
+ out, chain, err := testSupplyTracer(t, gspec, eip1559BlockGenerationFunc, 1)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -238,7 +286,7 @@ func TestSupplyWithdrawals(t *testing.T) {
})
}
- out, chain, err := testSupplyTracer(t, gspec, withdrawalsBlockGenerationFunc)
+ out, chain, err := testSupplyTracer(t, gspec, withdrawalsBlockGenerationFunc, 1)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -318,7 +366,7 @@ func TestSupplySelfdestruct(t *testing.T) {
}
// 1. Test pre Cancun
- preCancunOutput, preCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc)
+ preCancunOutput, preCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc, 1)
if err != nil {
t.Fatalf("Pre-cancun failed to test supply tracer: %v", err)
}
@@ -360,7 +408,7 @@ func TestSupplySelfdestruct(t *testing.T) {
gspec.Config.CancunTime = &cancunTime
gspec.Config.BlobScheduleConfig = params.DefaultBlobSchedule
- postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc)
+ postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc, 1)
if err != nil {
t.Fatalf("Post-cancun failed to test supply tracer: %v", err)
}
@@ -500,7 +548,7 @@ func TestSupplySelfdestructItselfAndRevert(t *testing.T) {
b.AddTx(tx)
}
- output, chain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc)
+ output, chain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc, 1)
if err != nil {
t.Fatalf("failed to test supply tracer: %v", err)
}
@@ -542,7 +590,7 @@ func TestSupplySelfdestructItselfAndRevert(t *testing.T) {
compareAsJSON(t, expected, actual)
}
-func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockGen)) ([]supplyInfo, *core.BlockChain, error) {
+func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(b *core.BlockGen), numBlocks int) ([]supplyInfo, *core.BlockChain, error) {
engine := beacon.New(ethash.NewFaker())
traceOutputPath := filepath.ToSlash(t.TempDir())
@@ -562,7 +610,7 @@ func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockG
}
defer chain.Stop()
- _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, 1, func(i int, b *core.BlockGen) {
+ _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, numBlocks, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{1})
gen(b)
})
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/7702_delegate.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/7702_delegate.json
index 14874dcc07..a86c289c3f 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/7702_delegate.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/7702_delegate.json
@@ -67,26 +67,23 @@
},
"config": {
"chainId": 1,
- "homesteadBlock": 1150000,
- "daoForkBlock": 1920000,
- "daoForkSupport": true,
- "eip150Block": 2463000,
- "eip155Block": 2675000,
- "eip158Block": 2675000,
- "byzantiumBlock": 4370000,
- "constantinopleBlock": 7280000,
- "petersburgBlock": 7280000,
- "istanbulBlock": 9069000,
- "muirGlacierBlock": 9200000,
- "berlinBlock": 12244000,
- "londonBlock": 12965000,
- "arrowGlacierBlock": 13773000,
- "grayGlacierBlock": 15050000,
- "shanghaiTime": 1681338455,
- "cancunTime": 1710338135,
- "pragueTime": 1746612311,
- "terminalTotalDifficulty": 58750000000000000000000,
- "depositContractAddress": "0x00000000219ab540356cbb839cbe05303d7705fa",
+ "homesteadBlock": 0,
+ "eip150Block": 0,
+ "eip155Block": 0,
+ "eip158Block": 0,
+ "byzantiumBlock": 0,
+ "constantinopleBlock": 0,
+ "petersburgBlock": 0,
+ "istanbulBlock": 0,
+ "muirGlacierBlock": 0,
+ "berlinBlock": 0,
+ "londonBlock": 0,
+ "arrowGlacierBlock": 0,
+ "grayGlacierBlock": 0,
+ "shanghaiTime": 0,
+ "cancunTime": 0,
+ "pragueTime": 0,
+ "terminalTotalDifficulty": 0,
"ethash": {},
"blobSchedule": {
"cancun": {
@@ -113,23 +110,58 @@
"input": "0x04f8ec0182075f830f424084714d24d7830493e09417816e9a858b161c3e37016d139cf618056cacd480a000000000000000000000000000000000000000000000000316580c3ab7e66cc4c0f85ef85c0194b684710e6d5914ad6e64493de2a3c424cc43e970823dc101a02f15ba55009fcd3682cd0f9c9645dd94e616f9a969ba3f1a5a2d871f9fe0f2b4a053c332a83312d0b17dd4c16eeb15b1ff5223398b14e0a55c70762e8f3972b7a580a02aceec9737d2a211c79aff3dbd4bf44a5cdabbdd6bbe19ff346a89d94d61914aa062e92842bfe7d2f3ff785c594c70fafafcb180fb32a774de1b92c588be8cd87b",
"result": {
"0x17816e9a858b161c3e37016d139cf618056cacd4": {
- "balance": "0x0",
- "code": "0xef0100b684710e6d5914ad6e64493de2a3c424cc43e970",
- "nonce": 15809
+ "balance": "0x0",
+ "code": "0xef0100b684710e6d5914ad6e64493de2a3c424cc43e970",
+ "codeHash":"0xca4cab497827c53a640924e1f7ebb69c3280f8ce8cef2d1d2f9a3707def2a856",
+ "nonce": 15809
+ },
+ "0x236501327e701692a281934230af0b6be8df3353": {
+ "balance": "0x0",
+ "code": "0x6080604052600a600c565b005b60186014601a565b605e565b565b600060597f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b3660008037600080366000845af43d6000803e808015607c573d6000f35b3d6000fdfea26469706673582212200b737106e31d6abde738d261a4c4f12fcdfac5141ebc6ab5ffe4cf6e1630aaed64736f6c63430008140033",
+ "codeHash": "0x297bbcbd2b9ae035f750536c62603b5b7240c69b04fe22eec21cf6fcbb61179f",
+ "nonce": 1,
+ "storage": {
+ "0x078d9cc432fb3eab476f678ef9a73d8ca570f23897c68eb99b2721ebf46e5a9e": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x000000000000000000000000bdb50eff425fb2b1b67fea21b8420eeb6d99ccc0",
+ "0x5555c0547520ec9521cc3134a71677625cdeb6accbb330321dcaf2cbc22c1fe9": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0x84fdd52031be5dc8bcfa0ffd090a0bf85ef922e1fa9d026be0cf5716edafb4db": "0x0000000000000000000000000000000000000000007b74591c97f086c1057bee",
+ "0x8c854b3845c254f768d5435bc89fa04fb52bd2f72a1cf4370b962cf104ecd5fc": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "0xc45aef11733ee3a84cf02368a8b99ca24b1e3bfc2f5f532a1a2439aa077d2843": "0x000000000000000000000000000000000000000000000738cda8f7729a2a8a1e",
+ "0xda699a88dd51ba5e1d66c40fd985a4ad1511875941c3dd2936300679d596ab7b": "0x0000000000000000000000000000000000000000000000000000000000000000"
+ }
},
"0x4838b106fce9647bdf1e7877bf73ce8b0bad5f97": {
- "balance": "0x8c2e6837fe7fb165",
- "nonce": 1874580
+ "balance": "0x8c2e6837fe7fb165",
+ "nonce": 1874580
},
"0xb684710e6d5914ad6e64493de2a3c424cc43e970": {
- "balance": "0x0",
- "code": "0x60806040525f4711156100b6575f3273ffffffffffffffffffffffffffffffffffffffff16476040516100319061048b565b5f6040518083038185875af1925050503d805f811461006b576040519150601f19603f3d011682016040523d82523d5f602084013e610070565b606091505b50509050806100b4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100ab906104f9565b60405180910390fd5b505b73ffffffffffffffffffffffffffffffffffffffff80166001336100da9190610563565b73ffffffffffffffffffffffffffffffffffffffff161115610131576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610128906105f4565b60405180910390fd5b73b9df4a9ba45917e71d664d51462d46926e4798e873ffffffffffffffffffffffffffffffffffffffff166001336101699190610563565b73ffffffffffffffffffffffffffffffffffffffff160361045c575f8036906101929190610631565b5f1c90505f73cda6461f1a30c618373f5790a83e1569fb685cba73ffffffffffffffffffffffffffffffffffffffff16631f3a71ba306040518263ffffffff1660e01b81526004016101e491906106af565b602060405180830381865afa1580156101ff573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061022391906106ff565b90508181106104595773cda6461f1a30c618373f5790a83e1569fb685cba73ffffffffffffffffffffffffffffffffffffffff1663a9059cbb5f836040518363ffffffff1660e01b815260040161027b929190610739565b6020604051808303815f875af1158015610297573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906102bb9190610795565b6102fa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102f1906104f9565b60405180910390fd5b5f73236501327e701692a281934230af0b6be8df335373ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b815260040161034891906106af565b602060405180830381865afa158015610363573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061038791906106ff565b905073236501327e701692a281934230af0b6be8df335373ffffffffffffffffffffffffffffffffffffffff1663a9059cbb32836040518363ffffffff1660e01b81526004016103d8929190610739565b6020604051808303815f875af11580156103f4573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104189190610795565b610457576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161044e9061080a565b60405180910390fd5b505b50505b005b5f81905092915050565b50565b5f6104765f8361045e565b915061048182610468565b5f82019050919050565b5f6104958261046b565b9150819050919050565b5f82825260208201905092915050565b7f5472616e73666572206661696c656400000000000000000000000000000000005f82015250565b5f6104e3600f8361049f565b91506104ee826104af565b602082019050919050565b5f6020820190508181035f830152610510816104d7565b9050919050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61056d82610517565b915061057883610517565b9250828201905073ffffffffffffffffffffffffffffffffffffffff8111156105a4576105a3610536565b5b92915050565b7f50616e69632831372900000000000000000000000000000000000000000000005f82015250565b5f6105de60098361049f565b91506105e9826105aa565b602082019050919050565b5f6020820190508181035f83015261060b816105d2565b9050919050565b5f82905092915050565b5f819050919050565b5f82821b905092915050565b5f61063c8383610612565b82610647813561061c565b92506020821015610687576106827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83602003600802610625565b831692505b505092915050565b5f61069982610517565b9050919050565b6106a98161068f565b82525050565b5f6020820190506106c25f8301846106a0565b92915050565b5f80fd5b5f819050919050565b6106de816106cc565b81146106e8575f80fd5b50565b5f815190506106f9816106d5565b92915050565b5f60208284031215610714576107136106c8565b5b5f610721848285016106eb565b91505092915050565b610733816106cc565b82525050565b5f60408201905061074c5f8301856106a0565b610759602083018461072a565b9392505050565b5f8115159050919050565b61077481610760565b811461077e575f80fd5b50565b5f8151905061078f8161076b565b92915050565b5f602082840312156107aa576107a96106c8565b5b5f6107b784828501610781565b91505092915050565b7f546f6b656e207472616e73666572206661696c656400000000000000000000005f82015250565b5f6107f460158361049f565b91506107ff826107c0565b602082019050919050565b5f6020820190508181035f830152610821816107e8565b905091905056fea2646970667358221220b6a06cc7b930dc4e34352a145f3548d57ec5a60d0097c1979ef363376bf9a69164736f6c63430008140033",
- "nonce": 1
+ "balance": "0x0",
+ "code": "0x60806040525f4711156100b6575f3273ffffffffffffffffffffffffffffffffffffffff16476040516100319061048b565b5f6040518083038185875af1925050503d805f811461006b576040519150601f19603f3d011682016040523d82523d5f602084013e610070565b606091505b50509050806100b4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100ab906104f9565b60405180910390fd5b505b73ffffffffffffffffffffffffffffffffffffffff80166001336100da9190610563565b73ffffffffffffffffffffffffffffffffffffffff161115610131576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610128906105f4565b60405180910390fd5b73b9df4a9ba45917e71d664d51462d46926e4798e873ffffffffffffffffffffffffffffffffffffffff166001336101699190610563565b73ffffffffffffffffffffffffffffffffffffffff160361045c575f8036906101929190610631565b5f1c90505f73cda6461f1a30c618373f5790a83e1569fb685cba73ffffffffffffffffffffffffffffffffffffffff16631f3a71ba306040518263ffffffff1660e01b81526004016101e491906106af565b602060405180830381865afa1580156101ff573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061022391906106ff565b90508181106104595773cda6461f1a30c618373f5790a83e1569fb685cba73ffffffffffffffffffffffffffffffffffffffff1663a9059cbb5f836040518363ffffffff1660e01b815260040161027b929190610739565b6020604051808303815f875af1158015610297573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906102bb9190610795565b6102fa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016102f1906104f9565b60405180910390fd5b5f73236501327e701692a281934230af0b6be8df335373ffffffffffffffffffffffffffffffffffffffff166370a08231306040518263ffffffff1660e01b815260040161034891906106af565b602060405180830381865afa158015610363573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061038791906106ff565b905073236501327e701692a281934230af0b6be8df335373ffffffffffffffffffffffffffffffffffffffff1663a9059cbb32836040518363ffffffff1660e01b81526004016103d8929190610739565b6020604051808303815f875af11580156103f4573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104189190610795565b610457576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161044e9061080a565b60405180910390fd5b505b50505b005b5f81905092915050565b50565b5f6104765f8361045e565b915061048182610468565b5f82019050919050565b5f6104958261046b565b9150819050919050565b5f82825260208201905092915050565b7f5472616e73666572206661696c656400000000000000000000000000000000005f82015250565b5f6104e3600f8361049f565b91506104ee826104af565b602082019050919050565b5f6020820190508181035f830152610510816104d7565b9050919050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61056d82610517565b915061057883610517565b9250828201905073ffffffffffffffffffffffffffffffffffffffff8111156105a4576105a3610536565b5b92915050565b7f50616e69632831372900000000000000000000000000000000000000000000005f82015250565b5f6105de60098361049f565b91506105e9826105aa565b602082019050919050565b5f6020820190508181035f83015261060b816105d2565b9050919050565b5f82905092915050565b5f819050919050565b5f82821b905092915050565b5f61063c8383610612565b82610647813561061c565b92506020821015610687576106827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83602003600802610625565b831692505b505092915050565b5f61069982610517565b9050919050565b6106a98161068f565b82525050565b5f6020820190506106c25f8301846106a0565b92915050565b5f80fd5b5f819050919050565b6106de816106cc565b81146106e8575f80fd5b50565b5f815190506106f9816106d5565b92915050565b5f60208284031215610714576107136106c8565b5b5f610721848285016106eb565b91505092915050565b610733816106cc565b82525050565b5f60408201905061074c5f8301856106a0565b610759602083018461072a565b9392505050565b5f8115159050919050565b61077481610760565b811461077e575f80fd5b50565b5f8151905061078f8161076b565b92915050565b5f602082840312156107aa576107a96106c8565b5b5f6107b784828501610781565b91505092915050565b7f546f6b656e207472616e73666572206661696c656400000000000000000000005f82015250565b5f6107f460158361049f565b91506107ff826107c0565b602082019050919050565b5f6020820190508181035f830152610821816107e8565b905091905056fea2646970667358221220b6a06cc7b930dc4e34352a145f3548d57ec5a60d0097c1979ef363376bf9a69164736f6c63430008140033",
+ "codeHash":"0x710e40f71ebfefb907b9970505d085952d073dedc9a67e7ce2db450194c9ad04",
+ "nonce": 1
},
"0xb9df4a9ba45917e71d664d51462d46926e4798e7": {
- "balance": "0x597af049b190a724",
- "code": "0xef0100000000009b1d0af20d8c6d0a44e162d11f9b8f00",
- "nonce": 1887
+ "balance": "0x597af049b190a724",
+ "code": "0xef0100000000009b1d0af20d8c6d0a44e162d11f9b8f00",
+ "codeHash":"0xbb1a21a37f4391e14c4817bca5df4ed60b84e372053b367731ccd8ab0fb6daf1",
+ "nonce": 1887
+ },
+ "0xbdb50eff425fb2b1b67fea21b8420eeb6d99ccc0": {
+ "balance": "0x0",
+ "code": "0x6080604052600436106101cd5760003560e01c80637ecebe00116100f7578063a9059cbb11610095578063d505accf11610064578063d505accf146105b2578063dd62ed3e146105d2578063f1127ed814610637578063f2fde38b1461068357600080fd5b8063a9059cbb14610509578063ad3cb1cc14610529578063b119490e14610572578063c3cda5201461059257600080fd5b80638e539e8c116100d15780638e539e8c1461048857806391ddadf4146104a857806395d89b41146104d45780639ab24eb0146104e957600080fd5b80637ecebe001461040357806384b0196e146104235780638da5cb5b1461044b57600080fd5b80634bf5d7e91161016f5780635c19a95c1161013e5780635c19a95c146103795780636fcfff451461039957806370a08231146103ce578063715018a6146103ee57600080fd5b80634bf5d7e9146102dc5780634f1ef286146102f157806352d1902d14610306578063587cde1e1461031b57600080fd5b806323b872dd116101ab57806323b872dd1461026b578063313ce5671461028b5780633644e515146102a75780633a46b1a8146102bc57600080fd5b806306fdde03146101d2578063095ea7b3146101fd57806318160ddd1461022d575b600080fd5b3480156101de57600080fd5b506101e76106a3565b6040516101f49190612a81565b60405180910390f35b34801561020957600080fd5b5061021d610218366004612ab0565b61075e565b60405190151581526020016101f4565b34801561023957600080fd5b507f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace02545b6040519081526020016101f4565b34801561027757600080fd5b5061021d610286366004612ada565b610778565b34801561029757600080fd5b50604051601281526020016101f4565b3480156102b357600080fd5b5061025d61079e565b3480156102c857600080fd5b5061025d6102d7366004612ab0565b6107ad565b3480156102e857600080fd5b506101e7610845565b6103046102ff366004612ba2565b6108d6565b005b34801561031257600080fd5b5061025d6108f5565b34801561032757600080fd5b50610361610336366004612c04565b6001600160a01b03908116600090815260008051602061312283398151915260205260409020541690565b6040516001600160a01b0390911681526020016101f4565b34801561038557600080fd5b50610304610394366004612c04565b610924565b3480156103a557600080fd5b506103b96103b4366004612c04565b61092f565b60405163ffffffff90911681526020016101f4565b3480156103da57600080fd5b5061025d6103e9366004612c04565b61093a565b3480156103fa57600080fd5b5061030461097f565b34801561040f57600080fd5b5061025d61041e366004612c04565b610993565b34801561042f57600080fd5b5061043861099e565b6040516101f49796959493929190612c1f565b34801561045757600080fd5b507f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300546001600160a01b0316610361565b34801561049457600080fd5b5061025d6104a3366004612cd1565b610a9a565b3480156104b457600080fd5b506104bd610b16565b60405165ffffffffffff90911681526020016101f4565b3480156104e057600080fd5b506101e7610b20565b3480156104f557600080fd5b5061025d610504366004612c04565b610b71565b34801561051557600080fd5b5061021d610524366004612ab0565b610bd1565b34801561053557600080fd5b506101e76040518060400160405280600581526020017f352e302e3000000000000000000000000000000000000000000000000000000081525081565b34801561057e57600080fd5b5061030461058d366004612d0a565b610bdf565b34801561059e57600080fd5b506103046105ad366004612d88565b610d4b565b3480156105be57600080fd5b506103046105cd366004612de0565b610e21565b3480156105de57600080fd5b5061025d6105ed366004612e4a565b6001600160a01b0391821660009081527f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace016020908152604080832093909416825291909152205490565b34801561064357600080fd5b50610657610652366004612e7d565b610fac565b60408051825165ffffffffffff1681526020928301516001600160d01b031692810192909252016101f4565b34801561068f57600080fd5b5061030461069e366004612c04565b610fca565b606060007f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace005b90508060030180546106da90612ebd565b80601f016020809104026020016040519081016040528092919081815260200182805461070690612ebd565b80156107535780601f1061072857610100808354040283529160200191610753565b820191906000526020600020905b81548152906001019060200180831161073657829003601f168201915b505050505091505090565b60003361076c818585611021565b60019150505b92915050565b600033610786858285611033565b6107918585856110e9565b60019150505b9392505050565b60006107a8611161565b905090565b6000600080516020613122833981519152816107c7610b16565b90508065ffffffffffff16841061080757604051637669fc0f60e11b81526004810185905265ffffffffffff821660248201526044015b60405180910390fd5b6108336108138561116b565b6001600160a01b03871660009081526001850160205260409020906111a2565b6001600160d01b031695945050505050565b606061084f61125b565b65ffffffffffff1661085f610b16565b65ffffffffffff161461089e576040517f6ff0714000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5060408051808201909152601d81527f6d6f64653d626c6f636b6e756d6265722666726f6d3d64656661756c74000000602082015290565b6108de611266565b6108e78261131d565b6108f18282611325565b5050565b60006108ff61140d565b507f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc90565b336108f18183611456565b600061077282611513565b6000807f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace005b6001600160a01b0390931660009081526020939093525050604090205490565b610987611564565b61099160006115d8565b565b600061077282611656565b600060608082808083817fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d10080549091501580156109dd57506001810154155b610a43576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f4549503731323a20556e696e697469616c697a6564000000000000000000000060448201526064016107fe565b610a4b611661565b610a536116b2565b604080516000808252602082019092527f0f000000000000000000000000000000000000000000000000000000000000009c939b5091995046985030975095509350915050565b600060008051602061312283398151915281610ab4610b16565b90508065ffffffffffff168410610aef57604051637669fc0f60e11b81526004810185905265ffffffffffff821660248201526044016107fe565b610b05610afb8561116b565b60028401906111a2565b6001600160d01b0316949350505050565b60006107a861125b565b7f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace0480546060917f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace00916106da90612ebd565b6001600160a01b03811660009081527fe8b26c30fad74198956032a3533d903385d56dd795af560196f9c78d4af40d016020526040812060008051602061312283398151915290610bc1906116dc565b6001600160d01b03169392505050565b60003361076c8185856110e9565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff16600081158015610c2a5750825b905060008267ffffffffffffffff166001148015610c475750303b155b905081158015610c55575080155b15610c8c576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b845467ffffffffffffffff191660011785558315610cc057845468ff00000000000000001916680100000000000000001785555b610cca8888611718565b610cd38861172a565b610cdb611771565b610ce433611779565b610cec611771565b610cf6338761178a565b8315610d4157845468ff000000000000000019168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b5050505050505050565b83421115610d88576040517f4683af0e000000000000000000000000000000000000000000000000000000008152600481018590526024016107fe565b604080517fe48329057bfd03d55e49b547132e39cffd9c1820ad7b9d4c5307691425d15adf60208201526001600160a01b038816918101919091526060810186905260808101859052600090610e0290610dfa9060a001604051602081830303815290604052805190602001206117c0565b858585611808565b9050610e0e8187611836565b610e188188611456565b50505050505050565b83421115610e5e576040517f62791302000000000000000000000000000000000000000000000000000000008152600481018590526024016107fe565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c9888888610eca8c6001600160a01b031660009081527f5ab42ced628888259c08ac98db1eb0cf702fc1501344311d8b100cd1bfe4bb006020526040902080546001810190915590565b6040805160208101969096526001600160a01b0394851690860152929091166060840152608083015260a082015260c0810186905260e0016040516020818303038152906040528051906020012090506000610f25826117c0565b90506000610f3582878787611808565b9050896001600160a01b0316816001600160a01b031614610f95576040517f4b800e460000000000000000000000000000000000000000000000000000000081526001600160a01b0380831660048301528b1660248201526044016107fe565b610fa08a8a8a611021565b50505050505050505050565b604080518082019091526000808252602082015261079783836118c1565b610fd2611564565b6001600160a01b038116611015576040517f1e4fbdf7000000000000000000000000000000000000000000000000000000008152600060048201526024016107fe565b61101e816115d8565b50565b61102e838383600161192c565b505050565b6001600160a01b0383811660009081527f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace01602090815260408083209386168352929052205460001981146110e357818110156110d4576040517ffb8f41b20000000000000000000000000000000000000000000000000000000081526001600160a01b038416600482015260248101829052604481018390526064016107fe565b6110e38484848403600061192c565b50505050565b6001600160a01b03831661112c576040517f96c6fd1e000000000000000000000000000000000000000000000000000000008152600060048201526024016107fe565b6001600160a01b0382166111565760405163ec442f0560e01b8152600060048201526024016107fe565b61102e838383611a58565b60006107a8611a63565b600065ffffffffffff82111561119e576040516306dfcc6560e41b815260306004820152602481018390526044016107fe565b5090565b8154600090818160058111156112015760006111bd84611ad7565b6111c79085612f0d565b60008881526020902090915081015465ffffffffffff90811690871610156111f1578091506111ff565b6111fc816001612f20565b92505b505b600061120f87878585611bbf565b9050801561124d5761123487611226600184612f0d565b600091825260209091200190565b54660100000000000090046001600160d01b0316611250565b60005b979650505050505050565b60006107a84361116b565b306001600160a01b037f000000000000000000000000bdb50eff425fb2b1b67fea21b8420eeb6d99ccc01614806112ff57507f000000000000000000000000bdb50eff425fb2b1b67fea21b8420eeb6d99ccc06001600160a01b03166112f37f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc546001600160a01b031690565b6001600160a01b031614155b156109915760405163703e46dd60e11b815260040160405180910390fd5b61101e611564565b816001600160a01b03166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa92505050801561137f575060408051601f3d908101601f1916820190925261137c91810190612f33565b60015b6113a757604051634c9c8ce360e01b81526001600160a01b03831660048201526024016107fe565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc8114611403576040517faa1d49a4000000000000000000000000000000000000000000000000000000008152600481018290526024016107fe565b61102e8383611c21565b306001600160a01b037f000000000000000000000000bdb50eff425fb2b1b67fea21b8420eeb6d99ccc016146109915760405163703e46dd60e11b815260040160405180910390fd5b6000805160206131228339815191526000611496846001600160a01b03908116600090815260008051602061312283398151915260205260409020541690565b6001600160a01b03858116600081815260208690526040808220805473ffffffffffffffffffffffffffffffffffffffff1916898616908117909155905194955093928516927f3134e8a2e6d97e929a7e54011ea5485d7d196dd5f0ba4d4ef95803e8e3fc257f9190a46110e3818461150e87611c77565b611c82565b6001600160a01b03811660009081527fe8b26c30fad74198956032a3533d903385d56dd795af560196f9c78d4af40d0160205260408120546000805160206131228339815191529061079790611dfc565b336115967f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300546001600160a01b031690565b6001600160a01b031614610991576040517f118cdaa70000000000000000000000000000000000000000000000000000000081523360048201526024016107fe565b7f9016d09d72d40fdae2fd8ceac6b6234c7706214fd39c1cd1e609a0528c199300805473ffffffffffffffffffffffffffffffffffffffff1981166001600160a01b03848116918217845560405192169182907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a3505050565b600061077282611e2d565b7fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d10280546060917fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d100916106da90612ebd565b606060007fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d1006106c9565b8054600090801561170f576116f683611226600184612f0d565b54660100000000000090046001600160d01b0316610797565b60009392505050565b611720611e56565b6108f18282611ebd565b611732611e56565b61101e816040518060400160405280600181526020017f3100000000000000000000000000000000000000000000000000000000000000815250611f20565b610991611e56565b611781611e56565b61101e81611f93565b6001600160a01b0382166117b45760405163ec442f0560e01b8152600060048201526024016107fe565b6108f160008383611a58565b60006107726117cd611161565b836040517f19010000000000000000000000000000000000000000000000000000000000008152600281019290925260228201526042902090565b60008060008061181a88888888611f9b565b92509250925061182a828261206a565b50909695505050505050565b6001600160a01b03821660009081527f5ab42ced628888259c08ac98db1eb0cf702fc1501344311d8b100cd1bfe4bb006020526040902080546001810190915581811461102e576040517f752d88c00000000000000000000000000000000000000000000000000000000081526001600160a01b0384166004820152602481018290526044016107fe565b604080518082018252600080825260208083018290526001600160a01b03861682527fe8b26c30fad74198956032a3533d903385d56dd795af560196f9c78d4af40d0190529190912060008051602061312283398151915290611924908461216e565b949350505050565b7f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace006001600160a01b038516611990576040517fe602df05000000000000000000000000000000000000000000000000000000008152600060048201526024016107fe565b6001600160a01b0384166119d3576040517f94280d62000000000000000000000000000000000000000000000000000000008152600060048201526024016107fe565b6001600160a01b03808616600090815260018301602090815260408083209388168352929052208390558115611a5157836001600160a01b0316856001600160a01b03167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92585604051611a4891815260200190565b60405180910390a35b5050505050565b61102e8383836121e1565b60007f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f611a8e612280565b611a966122fc565b60408051602081019490945283019190915260608201524660808201523060a082015260c00160405160208183030381529060405280519060200120905090565b600081600003611ae957506000919050565b60006001611af684612352565b901c6001901b90506001818481611b0f57611b0f612f4c565b048201901c90506001818481611b2757611b27612f4c565b048201901c90506001818481611b3f57611b3f612f4c565b048201901c90506001818481611b5757611b57612f4c565b048201901c90506001818481611b6f57611b6f612f4c565b048201901c90506001818481611b8757611b87612f4c565b048201901c90506001818481611b9f57611b9f612f4c565b048201901c905061079781828581611bb957611bb9612f4c565b046123e6565b60005b81831015611c19576000611bd684846123fc565b60008781526020902090915065ffffffffffff86169082015465ffffffffffff161115611c0557809250611c13565b611c10816001612f20565b93505b50611bc2565b509392505050565b611c2a82612417565b6040516001600160a01b038316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a2805115611c6f5761102e828261249b565b6108f1612511565b60006107728261093a565b6000805160206131228339815191526001600160a01b0384811690841614801590611cad5750600082115b156110e3576001600160a01b03841615611d57576001600160a01b038416600090815260018201602052604081208190611cf290612549611ced87612555565b612589565b6001600160d01b031691506001600160d01b03169150856001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611d4c929190918252602082015260400190565b60405180910390a250505b6001600160a01b038316156110e3576001600160a01b038316600090815260018201602052604081208190611d92906125c2611ced87612555565b6001600160d01b031691506001600160d01b03169150846001600160a01b03167fdec2bacdd2f05b59de34da9b523dff8be42e5e38e818c82fdb0bae774387a7248383604051611dec929190918252602082015260400190565b60405180910390a2505050505050565b600063ffffffff82111561119e576040516306dfcc6560e41b815260206004820152602481018390526044016107fe565b6000807f5ab42ced628888259c08ac98db1eb0cf702fc1501344311d8b100cd1bfe4bb0061095f565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a005468010000000000000000900460ff16610991576040517fd7e6bcf800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b611ec5611e56565b7f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace007f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace03611f118482612fb0565b50600481016110e38382612fb0565b611f28611e56565b7fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d1007fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d102611f748482612fb0565b5060038101611f838382612fb0565b5060008082556001909101555050565b610fd2611e56565b600080807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0841115611fd65750600091506003905082612060565b604080516000808252602082018084528a905260ff891692820192909252606081018790526080810186905260019060a0016020604051602081039080840390855afa15801561202a573d6000803e3d6000fd5b5050604051601f1901519150506001600160a01b03811661205657506000925060019150829050612060565b9250600091508190505b9450945094915050565b600082600381111561207e5761207e613070565b03612087575050565b600182600381111561209b5761209b613070565b036120d2576040517ff645eedf00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60028260038111156120e6576120e6613070565b03612120576040517ffce698f7000000000000000000000000000000000000000000000000000000008152600481018290526024016107fe565b600382600381111561213457612134613070565b036108f1576040517fd78bce0c000000000000000000000000000000000000000000000000000000008152600481018290526024016107fe565b6040805180820190915260008082526020820152826000018263ffffffff168154811061219d5761219d613086565b60009182526020918290206040805180820190915291015465ffffffffffff81168252660100000000000090046001600160d01b0316918101919091529392505050565b6121ec8383836125ce565b6001600160a01b0383166122755760006122247f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace025490565b90506001600160d01b0380821115612272576040517f1cb15d2600000000000000000000000000000000000000000000000000000000815260048101839052602481018290526044016107fe565b50505b61102e838383612737565b60007fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d100816122ac611661565b8051909150156122c457805160209091012092915050565b815480156122d3579392505050565b7fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470935050505090565b60007fa16a46d94261c7517cc8ff89f61c0ce93598e3c849801011dee649a6a557d100816123286116b2565b80519091501561234057805160209091012092915050565b600182015480156122d3579392505050565b600080608083901c1561236757608092831c92015b604083901c1561237957604092831c92015b602083901c1561238b57602092831c92015b601083901c1561239d57601092831c92015b600883901c156123af57600892831c92015b600483901c156123c157600492831c92015b600283901c156123d357600292831c92015b600183901c156107725760010192915050565b60008183106123f55781610797565b5090919050565b600061240b600284841861309c565b61079790848416612f20565b806001600160a01b03163b60000361244d57604051634c9c8ce360e01b81526001600160a01b03821660048201526024016107fe565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc805473ffffffffffffffffffffffffffffffffffffffff19166001600160a01b0392909216919091179055565b6060600080846001600160a01b0316846040516124b891906130be565b600060405180830381855af49150503d80600081146124f3576040519150601f19603f3d011682016040523d82523d6000602084013e6124f8565b606091505b50915091506125088583836127cd565b95945050505050565b3415610991576040517fb398979f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061079782846130da565b60006001600160d01b0382111561119e576040516306dfcc6560e41b815260d06004820152602481018390526044016107fe565b6000806125b5612597610b16565b6125ad6125a3886116dc565b868863ffffffff16565b879190612842565b915091505b935093915050565b60006107978284613101565b7f52c63247e1f47db19d5ce0460030c497f067ca4cebf71ba98eeadabe20bace006001600160a01b03841661261c57818160020160008282546126119190612f20565b909155506126a79050565b6001600160a01b03841660009081526020829052604090205482811015612688576040517fe450d38c0000000000000000000000000000000000000000000000000000000081526001600160a01b038616600482015260248101829052604481018490526064016107fe565b6001600160a01b03851660009081526020839052604090209083900390555b6001600160a01b0383166126c55760028101805483900390556126e4565b6001600160a01b03831660009081526020829052604090208054830190555b826001600160a01b0316846001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8460405161272991815260200190565b60405180910390a350505050565b6000805160206131228339815191526001600160a01b03841661276a57612767816002016125c2611ced85612555565b50505b6001600160a01b03831661278e5761278b81600201612549611ced85612555565b50505b6001600160a01b03848116600090815260008051602061312283398151915260205260408082205486841683529120546110e392918216911684611c82565b6060826127e2576127dd82612850565b610797565b81511580156127f957506001600160a01b0384163b155b1561283b576040517f9996b3150000000000000000000000000000000000000000000000000000000081526001600160a01b03851660048201526024016107fe565b5080610797565b6000806125b5858585612892565b8051156128605780518082602001fd5b6040517f1425ea4200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8254600090819080156129d35760006128b087611226600185612f0d565b60408051808201909152905465ffffffffffff80821680845266010000000000009092046001600160d01b031660208401529192509087161015612920576040517f2520601d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b805165ffffffffffff80881691160361296f578461294388611226600186612f0d565b80546001600160d01b039290921666010000000000000265ffffffffffff9092169190911790556129c3565b6040805180820190915265ffffffffffff80881682526001600160d01b0380881660208085019182528b54600181018d5560008d815291909120945191519092166601000000000000029216919091179101555b6020015192508391506125ba9050565b50506040805180820190915265ffffffffffff80851682526001600160d01b0380851660208085019182528854600181018a5560008a81529182209551925190931666010000000000000291909316179201919091559050816125ba565b60005b83811015612a4c578181015183820152602001612a34565b50506000910152565b60008151808452612a6d816020860160208601612a31565b601f01601f19169290920160200192915050565b6020815260006107976020830184612a55565b80356001600160a01b0381168114612aab57600080fd5b919050565b60008060408385031215612ac357600080fd5b612acc83612a94565b946020939093013593505050565b600080600060608486031215612aef57600080fd5b612af884612a94565b9250612b0660208501612a94565b9150604084013590509250925092565b634e487b7160e01b600052604160045260246000fd5b600067ffffffffffffffff80841115612b4757612b47612b16565b604051601f8501601f19908116603f01168101908282118183101715612b6f57612b6f612b16565b81604052809350858152868686011115612b8857600080fd5b858560208301376000602087830101525050509392505050565b60008060408385031215612bb557600080fd5b612bbe83612a94565b9150602083013567ffffffffffffffff811115612bda57600080fd5b8301601f81018513612beb57600080fd5b612bfa85823560208401612b2c565b9150509250929050565b600060208284031215612c1657600080fd5b61079782612a94565b7fff00000000000000000000000000000000000000000000000000000000000000881681526000602060e081840152612c5b60e084018a612a55565b8381036040850152612c6d818a612a55565b606085018990526001600160a01b038816608086015260a0850187905284810360c0860152855180825283870192509083019060005b81811015612cbf57835183529284019291840191600101612ca3565b50909c9b505050505050505050505050565b600060208284031215612ce357600080fd5b5035919050565b600082601f830112612cfb57600080fd5b61079783833560208501612b2c565b600080600060608486031215612d1f57600080fd5b833567ffffffffffffffff80821115612d3757600080fd5b612d4387838801612cea565b94506020860135915080821115612d5957600080fd5b50612d6686828701612cea565b925050604084013590509250925092565b803560ff81168114612aab57600080fd5b60008060008060008060c08789031215612da157600080fd5b612daa87612a94565b95506020870135945060408701359350612dc660608801612d77565b92506080870135915060a087013590509295509295509295565b600080600080600080600060e0888a031215612dfb57600080fd5b612e0488612a94565b9650612e1260208901612a94565b95506040880135945060608801359350612e2e60808901612d77565b925060a0880135915060c0880135905092959891949750929550565b60008060408385031215612e5d57600080fd5b612e6683612a94565b9150612e7460208401612a94565b90509250929050565b60008060408385031215612e9057600080fd5b612e9983612a94565b9150602083013563ffffffff81168114612eb257600080fd5b809150509250929050565b600181811c90821680612ed157607f821691505b602082108103612ef157634e487b7160e01b600052602260045260246000fd5b50919050565b634e487b7160e01b600052601160045260246000fd5b8181038181111561077257610772612ef7565b8082018082111561077257610772612ef7565b600060208284031215612f4557600080fd5b5051919050565b634e487b7160e01b600052601260045260246000fd5b601f82111561102e57600081815260208120601f850160051c81016020861015612f895750805b601f850160051c820191505b81811015612fa857828155600101612f95565b505050505050565b815167ffffffffffffffff811115612fca57612fca612b16565b612fde81612fd88454612ebd565b84612f62565b602080601f8311600181146130135760008415612ffb5750858301515b600019600386901b1c1916600185901b178555612fa8565b600085815260208120601f198616915b8281101561304257888601518255948401946001909101908401613023565b50858210156130605787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b634e487b7160e01b600052602160045260246000fd5b634e487b7160e01b600052603260045260246000fd5b6000826130b957634e487b7160e01b600052601260045260246000fd5b500490565b600082516130d0818460208701612a31565b9190910192915050565b6001600160d01b038281168282160390808211156130fa576130fa612ef7565b5092915050565b6001600160d01b038181168382160190808211156130fa576130fa612ef756fee8b26c30fad74198956032a3533d903385d56dd795af560196f9c78d4af40d00a2646970667358221220c2a4c7c504a36ab9781f5fb312d81d27f781047ab9f97621c7f031a185ecb78864736f6c63430008140033",
+ "codeHash": "0x1c83a51aa39aa075951b4fa0aa146c33a33e035e0d7023b9de7f27a5a3d15058",
+ "nonce": 1
+ },
+ "0xcda6461f1a30c618373f5790a83e1569fb685cba": {
+ "balance": "0x0",
+ "code": "0x608060405234801561001057600080fd5b50600436106100ea5760003560e01c8063313ce5671161008c578063a9059cbb11610066578063a9059cbb146102ab578063dd62ed3e146102be578063e6fd48bc146102d4578063fc0c546a146102fb57600080fd5b8063313ce567146101f857806370a082311461023157806395d89b411461025157600080fd5b80631514617e116100c85780631514617e146101a857806318160ddd146101cf5780631f3a71ba146101d757806323b872dd146101ea57600080fd5b80630483a7f6146100ef57806306fdde0314610122578063095ea7b314610185575b600080fd5b61010f6100fd366004610926565b60006020819052908152604090205481565b6040519081526020015b60405180910390f35b604080517f466c75656e636520546f6b656e20284c6f636b65642900000000000000000000602082015281519082019091527f000000000000000000000000000000000000000000000000000000000000001681525b6040516101199190610965565b610198610193366004610998565b61033a565b6040519015158152602001610119565b61010f7f0000000000000000000000000000000000000000000000000000000001e1338081565b60025461010f565b61010f6101e5366004610926565b61038a565b6101986101933660046109c2565b61021f7f000000000000000000000000000000000000000000000000000000000000001281565b60405160ff9091168152602001610119565b61010f61023f366004610926565b60016020526000908152604090205481565b604080517f464c542d4c000000000000000000000000000000000000000000000000000000602082015281519082019091527f00000000000000000000000000000000000000000000000000000000000000058152610178565b6101986102b9366004610998565b610485565b61010f6102cc3660046109fe565b600092915050565b61010f7f0000000000000000000000000000000000000000000000000000000067afabe881565b6103227f000000000000000000000000236501327e701692a281934230af0b6be8df335381565b6040516001600160a01b039091168152602001610119565b60405162461bcd60e51b815260206004820152601560248201527f556e737570706f72746564206f7065726174696f6e000000000000000000000060448201526000906064015b60405180910390fd5b60007f0000000000000000000000000000000000000000000000000000000067afabe842116103bb57506000919050565b6001600160a01b0382166000908152602081815260408083205460019092528220547f0000000000000000000000000000000000000000000000000000000001e13380929061040a9083610a47565b905060006104387f0000000000000000000000000000000000000000000000000000000067afabe842610a47565b905060008482106104545761044d8385610a47565b905061047b565b60006104608686610a5a565b90508361046d8285610a7c565b6104779190610a47565b9150505b9695505050505050565b60006001600160a01b038316156105045760405162461bcd60e51b815260206004820152602960248201527f5472616e7366657220616c6c6f776564206f6e6c7920746f20746865207a657260448201527f6f206164647265737300000000000000000000000000000000000000000000006064820152608401610381565b3361050f8184610568565b836001600160a01b0316816001600160a01b03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef8560405161055491815260200190565b60405180910390a360019150505b92915050565b60006105738361038a565b9050600081116105c55760405162461bcd60e51b815260206004820152601d60248201527f4e6f7420656e6f756768207468652072656c6561736520616d6f756e740000006044820152606401610381565b8115610620578082111561061b5760405162461bcd60e51b815260206004820152601d60248201527f4e6f7420656e6f756768207468652072656c6561736520616d6f756e740000006044820152606401610381565b610624565b8091505b6001600160a01b0383166000908152600160205260408120805484929061064c908490610a47565b9250508190555081600260008282546106659190610a47565b9091555061069f90506001600160a01b037f000000000000000000000000236501327e701692a281934230af0b6be8df33531684846106a4565b505050565b604080516001600160a01b03848116602483015260448083018590528351808403909101815260649092019092526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fa9059cbb0000000000000000000000000000000000000000000000000000000017905261069f9185919060009061073090841683610797565b905080516000141580156107555750808060200190518101906107539190610a93565b155b1561069f576040517f5274afe70000000000000000000000000000000000000000000000000000000081526001600160a01b0384166004820152602401610381565b60606107a5838360006107ac565b9392505050565b6060814710156107ea576040517fcd786059000000000000000000000000000000000000000000000000000000008152306004820152602401610381565b600080856001600160a01b031684866040516108069190610ab5565b60006040518083038185875af1925050503d8060008114610843576040519150601f19603f3d011682016040523d82523d6000602084013e610848565b606091505b509150915061047b86838360608261086857610863826108c8565b6107a5565b815115801561087f57506001600160a01b0384163b155b156108c1576040517f9996b3150000000000000000000000000000000000000000000000000000000081526001600160a01b0385166004820152602401610381565b50806107a5565b8051156108d85780518082602001fd5b6040517f1425ea4200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80356001600160a01b038116811461092157600080fd5b919050565b60006020828403121561093857600080fd5b6107a58261090a565b60005b8381101561095c578181015183820152602001610944565b50506000910152565b6020815260008251806020840152610984816040850160208701610941565b601f01601f19169190910160400192915050565b600080604083850312156109ab57600080fd5b6109b48361090a565b946020939093013593505050565b6000806000606084860312156109d757600080fd5b6109e08461090a565b92506109ee6020850161090a565b9150604084013590509250925092565b60008060408385031215610a1157600080fd5b610a1a8361090a565b9150610a286020840161090a565b90509250929050565b634e487b7160e01b600052601160045260246000fd5b8181038181111561056257610562610a31565b600082610a7757634e487b7160e01b600052601260045260246000fd5b500490565b808202811582820484141761056257610562610a31565b600060208284031215610aa557600080fd5b815180151581146107a557600080fd5b60008251610ac7818460208701610941565b919091019291505056fea2646970667358221220aa9a251bde32306273cb5f6045040ac4b74b767bd02205c60c6003c5346ac34c64736f6c63430008140033",
+ "codeHash": "0xc10e7caa80b2af0d4faa10cd68f5a88dc5bbcf9d4f056677c3d259c8f31040e9",
+ "nonce": 1,
+ "storage": {
+ "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000007b74591c97f086c1057bee",
+ "0x4f2aab765280a617b8913308bffbaed810827576241edbcd290b48d2b699bf92": "0x0000000000000000000000000000000000000000000580926bcba6406ba40000",
+ "0xd057d56b4d1539d5c08615edc01a9792908fefc021b63dbdc5db20bf522e882e": "0x00000000000000000000000000000000000000000003920c271ee5a29be97bee"
+ }
}
}
}
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code.json
index 5601ac797a..d5530ca61d 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code.json
@@ -65,6 +65,7 @@
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
"balance": "0x4d87094125a369d9bd5",
"nonce": 1,
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json
index 310a6696b8..9c39eca77b 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_code_and_storage.json
@@ -65,7 +65,8 @@
},
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
"balance": "0x4d87094125a369d9bd5",
- "nonce": 1
+ "nonce": 1,
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f"
},
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
"balance": "0x1780d77678137ac1b775",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json
index c0cb05a2a2..a70151650e 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/disable_storage.json
@@ -65,7 +65,8 @@
"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": {
"balance": "0x4d87094125a369d9bd5",
"nonce": 1,
- "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029"
+ "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f"
},
"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": {
"balance": "0x1780d77678137ac1b775",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json
index 121509f132..03e825fe2f 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/setcode_tx.json
@@ -83,7 +83,8 @@
},
"0x000000000000000000000000000000000000bbbb": {
"balance": "0x0",
- "code": "0x6042604255"
+ "code": "0x6042604255",
+ "codeHash":"0xfa2f0a459fb0004c3c79afe1ab7612a23f1e649b3b352242f8c7c45a0e3585b6"
},
"0x703c4b2bd70c169f5717101caee543299fc946c7": {
"balance": "0xde0b6b3a7640000",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json
index bbfdae306e..bd57764e6b 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/simple.json
@@ -64,6 +64,7 @@
"balance": "0x4d87094125a369d9bd5",
"nonce": 1,
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json
index 6eea6085b8..696f3fb04f 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json
@@ -71,6 +71,7 @@
},
"0x40f2f445da6c9047554683fb382fba6769717116": {
"code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056",
+ "codeHash": "0x19463d2ef23c9fcb3f853199279ecc9b21fa4147112bfe85664141ffbffd1a37",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f0c5cef39b17c213cfe090a46b8c7760ffb7928a",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000000000000000001ee",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json
index 5d7c024a5e..81382524ab 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_code.json
@@ -70,6 +70,7 @@
"balance": "0x9fb71abdd2621d8886"
},
"0x40f2f445da6c9047554683fb382fba6769717116": {
+ "codeHash":"0x19463d2ef23c9fcb3f853199279ecc9b21fa4147112bfe85664141ffbffd1a37",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f0c5cef39b17c213cfe090a46b8c7760ffb7928a",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000000000000000001ee",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_storage.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_storage.json
index 65594feb44..e36670e50b 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_storage.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_disable_storage.json
@@ -70,7 +70,8 @@
"balance": "0x9fb71abdd2621d8886"
},
"0x40f2f445da6c9047554683fb382fba6769717116": {
- "code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056"
+ "code": "0x60606040526000357c01000000000000000000000000000000000000000000000000000000009004806341c0e1b514610044578063cfae32171461005157610042565b005b61004f6004506100ca565b005b61005c60045061015e565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f1680156100bc5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561015b57600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16ff5b5b565b60206040519081016040528060008152602001506001600050805480601f016020809104026020016040519081016040528092919081815260200182805480156101cd57820191906000526020600020905b8154815290600101906020018083116101b057829003601f168201915b505050505090506101d9565b9056",
+ "codeHash": "0x19463d2ef23c9fcb3f853199279ecc9b21fa4147112bfe85664141ffbffd1a37"
},
"0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": {
"balance": "0x15b058920efcc5188",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_post_eip158.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_post_eip158.json
index 19e1f08bb7..26d0572517 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_post_eip158.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_post_eip158.json
@@ -57,8 +57,9 @@
"result": {
"post": {
"0x1bda2f8e4735507930bd6cfe873bf0bf0f4ab1de": {
+ "nonce": 1,
"code": "0x608060405234801561001057600080fd5b50600436106100365760003560e01c806309ce9ccb1461003b5780633fb5c1cb14610059575b600080fd5b610043610075565b60405161005091906100e2565b60405180910390f35b610073600480360381019061006e919061012e565b61007b565b005b60005481565b80600081905550600a8111156100c6576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016100bd906101de565b60405180910390fd5b50565b6000819050919050565b6100dc816100c9565b82525050565b60006020820190506100f760008301846100d3565b92915050565b600080fd5b61010b816100c9565b811461011657600080fd5b50565b60008135905061012881610102565b92915050565b600060208284031215610144576101436100fd565b5b600061015284828501610119565b91505092915050565b600082825260208201905092915050565b7f4e756d6265722069732067726561746572207468616e2031302c207472616e7360008201527f616374696f6e2072657665727465642e00000000000000000000000000000000602082015250565b60006101c860308361015b565b91506101d38261016c565b604082019050919050565b600060208201905081810360008301526101f7816101bb565b905091905056fea264697066735822122069018995fecf03bda91a88b6eafe41641709dee8b4a706fe301c8a569fe8c1b364736f6c63430008130033",
- "nonce": 1
+ "codeHash": "0xf6387add93966c115d42eb1ecd36a1fa28841703312943db753b88f890cc1666"
},
"0x2445e8c26a2bf3d1e59f1bb9b1d442caf90768e0": {
"balance": "0x10f0645688331eb5690"
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json
index e6d6f2435b..64a7188061 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create.json
@@ -212,6 +212,7 @@
"balance": "0x0",
"nonce": 237,
"code": "0x6060604052361561027c5760e060020a600035046301991313811461027e57806303d22885146102ca5780630450991814610323578063049ae734146103705780630ce46c43146103c35780630e85023914610602578063112e39a8146106755780631b4fa6ab146106c25780631e74a2d3146106d057806326a7985a146106fd5780633017fe2414610753578063346cabbc1461075c578063373a1bc3146107d55780633a9e74331461081e5780633c2c21a01461086e5780633d9ce89b146108ba578063480b70bd1461092f578063481078431461097e57806348f0518714610a0e5780634c471cde14610a865780634db3da8314610b09578063523ccfa814610b4f578063586a69fa14610be05780635a9f2def14610c3657806364ee49fe14610caf57806367beaccb14610d055780636840246014610d74578063795b9a6f14610dca5780637b55c8b514610e415780637c73f84614610ee15780638c0e156d14610f145780638c1d01c814610f605780638e46afa914610f69578063938c430714610fc0578063971c803f146111555780639772c982146111ac57806398c9cdf41461122857806398e00e541461127f5780639f927be7146112d5578063a00aede914611383578063a1c0539d146113d3578063aff21c6514611449578063b152f19e14611474578063b549793d146114cb578063b5b33eda1461154b578063bbc6eb1f1461159b578063c0f68859146115ab578063c3a2c0c314611601578063c43d05751461164b578063d8e5c04814611694578063dbfef71014611228578063e29fb547146116e7578063e6470fbe1461173a578063ea27a8811461174c578063ee77fe86146117d1578063f158458c14611851575b005b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387876020604051908101604052806000815260200150612225610f6d565b61188260043560243560443560643560843560a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338b8a6020604051908101604052806000815260200150896125196106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a026020604051908101604052806000815260200150611e4a610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503389896020604051908101604052806000815260200150886124e86106c6565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750506040805160a08082019092529597963596608435969095506101449450925060a491506005908390839080828437509095505050505050604080518082018252600160a060020a03338116825288166020820152815160c0810190925260009173e54d323f9ef17c1f0dede47ecc86a9718fe5ea349163e3042c0f91600191908a908a9089908b90808b8b9090602002015181526020018b60016005811015610002579090602002015181526020018b60026005811015610002579090602002015181526020018b60036005811015610002579090602002015181526020018b6004600581101561000257909060200201518152602001348152602001506040518860e060020a02815260040180888152602001876002602002808383829060006004602084601f0104600f02600301f150905001868152602001806020018560ff1681526020018461ffff168152602001836006602002808383829060006004602084601f0104600f02600301f1509050018281038252868181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156105d25780820380516001836020036101000a031916815260200191505b509850505050505050505060206040518083038160008760325a03f2156100025750506040515191506124cd9050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808787611e64610f6d565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611d28610f6d565b61189f5b6000611bf8611159565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881600060005054611a9561159f565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346326a7985a6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b760075b90565b604080516020606435600481810135601f8101849004840285018401909552848452611882948135946024803595604435956084949201919081908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160013389898861224b610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386866020604051908101604052806000815260200150611e64610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333896020604051908101604052806000815260200150886123bc6106c6565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387866020604051908101604052806000815260200150611f8d610f6d565b60408051602060248035600481810135601f810185900485028601850190965285855261188295813595919460449492939092019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808888612225610f6d565b611882600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503388886020604051908101604052806000815260200150612388610f6d565b611882600435604080517fc4144b2600000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163c4144b26916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437509496505093359350505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133888888612238610f6d565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a43560c435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338b8b8b896126536106c6565b611882600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333866020604051908101604052806000815260200150611e4a610f6d565b6118b76004355b604080517fed5bd7ea00000000000000000000000000000000000000000000000000000000815260016004820152600160a060020a03831660248201529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163ed5bd7ea916044818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b61189f600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463586a69fa6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f81018590048502860185019096528585526118829581359591946044949293909201918190840183828082843750949650509335935050606435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808989612388610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a896020604051908101604052806000815260200150886124d76106c6565b6040805160206004803580820135601f8101849004840285018401909552848452611882949193602493909291840191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808587611e4a610f6d565b61188260043560243560443560643560843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050338a8a60206040519081016040528060008152602001508961262d6106c6565b604080516020606435600481810135601f810184900484028501840190955284845261188294813594602480359560443595608494920191908190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338888876120c7610f6d565b604080516020604435600481810135601f81018490048402850184019095528484526118829481359460248035959394606494929391019181908401838280828437505060408051608080820190925295979635969561010495509350608492508591508390839080828437509095505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989898961263a6106c6565b6118b7600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881858585611ba361122c565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001600050333388602060405190810160405280600081526020015061236e610f6d565b6118b760005481565b6118c95b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea34638e46afa96040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a43560c43560e43561010435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338e8e8d8f8e8e8e8e8e346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156111195780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519b9a5050505050505050505050565b61189f5b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463971c803f6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650509335935050608435915050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338989896123a2610f6d565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398c9cdf46040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346398e00e546040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600435604080517fe6ce3a6a000000000000000000000000000000000000000000000000000000008152600160048201527f3e3d0000000000000000000000000000000000000000000000000000000000006024820152604481018390529051600091737c1eb207c07e7ab13cf245585bd03d0fa478d0349163e6ce3a6a916064818101926020929091908290030181878760325a03f215610002575050604051519150611b409050565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503385600060e060020a0260206040519081016040528060008152602001506121ef610f6d565b604080516020604435600481810135601f8101849004840285018401909552848452611882948135946024803595939460649492939101918190840183828082843750949650505050505050600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338787876120b5610f6d565b6118b7600435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88183611b4561159f565b6118b75b600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463b152f19e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b60408051602060248035600481810135601f8101859004850286018501909652858552611882958135959194604494929390920191819084018382808284375094965050933593505060643591505060843560a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600133808b8b8961262d6106c6565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503386600060e060020a026020604051908101604052806000815260200150612200610f6d565b6118b75b60005460649004610759565b6118b7600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463c0f688596040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506107599050565b611882600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333600060e060020a026020604051908101604052806000815260200150611bff610f6d565b611882600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503333876020604051908101604052806000815260200150612200610f6d565b611882600435602435604435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e41160016000503387600060e060020a026020604051908101604052806000815260200150612213610f6d565b611882600435602435604435606435608435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e411600160005033338a60206040519081016040528060008152602001508961250c6106c6565b61027c6000600060006118e033610b56565b6118b7600435602435604435606435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a881868686866040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b949350505050565b604080516020604435600481810135601f810184900484028501840190955284845261188294813594602480359593946064949293910191819084018382808284375094965050933593505060843591505060a435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea346350d4e4116001338a8a8a886124fa6106c6565b6118b7600435602435600073e54d323f9ef17c1f0dede47ecc86a9718fe5ea3463ea27a88184846000611b4f61122c565b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b60408051918252519081900360200190f35b6040805160ff929092168252519081900360200190f35b15611a905733925082600160a060020a031663c6502da86040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fc6803622000000000000000000000000000000000000000000000000000000008252915191945063c680362291600482810192602092919082900301816000876161da5a03f11561000257505060405151905080156119d1575082600160a060020a031663d379be236040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a03166000141590505b80156119dd5750600082115b80156119ec5750600054600190115b15611a90578183600160a060020a031663830953ab6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040515160640291909104915050604281118015611a4d5750600054829011155b15611a675760008054612710612711909102049055611a90565b602181108015611a7a5750600054829010155b15611a90576000805461271061270f9091020490555b505050565b6000611a9f61122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b919050565b6000611af261122c565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b6040518560e060020a0281526004018085815260200184815260200183815260200182815260200194505050505060206040518083038160008760325a03f215610002575050604051519150505b9392505050565b9050610759565b611c076106c6565b6000611c11611478565b611c1961122c565b600054611c2461159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611cf25780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506107599050565b611d306106c6565b60008b611d3b61122c565b600054611d4661159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611e145780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b409050565b611e526106c6565b6000611e5c611478565b611d3b61122c565b611e6c6106c6565b6000611e76611478565b611e7e61122c565b600054611e8961159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611f575780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611b9d9050565b611f956106c6565b8b611f9e611478565b611fa661122c565b600054611fb161159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561207f5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150611bf19050565b6120bd6106c6565b6000611f9e611478565b6120cf6106c6565b8b6120d8611478565b6120e061122c565b6000546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156121b95780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f2156100025750506040515191506117c99050565b6121f76106c6565b8b611e76611478565b6122086106c6565b60008b611e7e61122c565b61221b6106c6565b8a8c611fa661122c565b61222d6106c6565b60008b611fa661122c565b6122406106c6565b60008b6120e061122c565b6122536106c6565b8c8b61225d61122c565b60005461226861159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156123365780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f21561000257505060405151979650505050505050565b6123766106c6565b60008c8c600060005054611fb161159f565b6123906106c6565b60008c8c6000600050546120eb61159f565b6123aa6106c6565b60008c8c60006000505461226861159f565b60008d8d6000600050546120eb61159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561249c5780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519150505b9695505050505050565b8e8d8d6000600050546123ce61159f565b60008d8d60006000505461226861159f565b60008d8d6000600050546123ce61159f565b60008e8e8d61226861159f565b8f8e8e8d61252561159f565b346040518e60e060020a028152600401808e81526020018d600160a060020a031681526020018c600160a060020a031681526020018b8152602001806020018a60ff1681526020018961ffff16815260200188815260200187815260200186815260200185815260200184815260200183815260200182810382528b8181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156125f35780820380516001836020036101000a031916815260200191505b509e50505050505050505050505050505060206040518083038160008760325a03f215610002575050604051519998505050505050505050565b60008e8e8d6123ce61159f565b8a5160208c015160408d015160608e015161226861159f565b60008e8e8d61252561159f56",
+ "codeHash": "0x461e17b7ae561793f22843985fc6866a3395c1fcee8ebf2d7ed5f293aec1b473",
"storage": {
"0x26cba0705aade77fa0f9275b68d01fb71206a44abd3a4f5a838f7241efbc8abf": "0x00000000000000000000000042e69cd0a17ae9992f9ad93d136c4bb0d95e3230",
"0x49f03a2c2f4fd666a32141fb324283b6f84a1d07b5fa435669fdb55766aef715": "0x000000000000000000000000d7b0e93fa8386b17fb5d1cf934076203dcc122f3",
@@ -226,11 +227,13 @@
},
"0x741467b251fca923d6229c4b439078b55dca233b": {
"balance": "0x29c613529e8218f8",
- "code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256"
+ "code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256",
+ "codeHash": "0x7678943ba1f399d76abe8e77b6f899c193f72aaefb5c4bd47fffb63c7f57ad9e"
},
"0x7dd677b54fc954824a7bc49bd26cbdfa12c75adf": {
"balance": "0xd7a58f5b73b4b6c4",
"code": "0x606060405236156100985760e060020a60003504633896002781146100e15780633defb962146100ea5780633f4be8891461010c5780634136aa351461011f5780634a420138146101a057806369c1a7121461028c5780638129fc1c146102955780638da5cb5b146102a6578063ae45850b146102b8578063af3309d8146102cc578063ea8a1af0146102d5578063ead50da3146102f4575b610308671bc16d674ec8000030600160a060020a03163110156100df57600554604051600160a060020a03918216916000913091909116319082818181858883f150505050505b565b61030a60005481565b610308671bc16d674ec8000030600160a060020a031631101561040f576100df565b61031c600454600160a060020a03165b90565b61030a5b600080548190118015610199575060408051600480547f0a16697a0000000000000000000000000000000000000000000000000000000083529251600160a060020a039390931692630a16697a928083019260209291829003018187876161da5a03f1156100025750506040515160ff01431090505b905061011c565b6103085b600354600554604080517f8c0e156d0000000000000000000000000000000000000000000000000000000081527f3defb96200000000000000000000000000000000000000000000000000000000600482015260a060020a90920461ffff1643016024830152621e8480604483015251600092600160a060020a031691638c0e156d916729a2241af62c000091606481810192602092909190829003018185886185025a03f1156100025750506040515192600160a060020a0384161491506102899050576004805473ffffffffffffffffffffffffffffffffffffffff1916821790555b50565b61030a60015481565b61030860008054146103f2576100df565b61031c600554600160a060020a031681565b61031c600354600160a060020a031661011c565b61030a60025481565b610308600554600160a060020a03908116339091161461035157610002565b61033960055460a060020a900461ffff1681565b005b60408051918252519081900360200190f35b60408051600160a060020a03929092168252519081900360200190f35b6040805161ffff929092168252519081900360200190f35b6004546000600160a060020a03919091163111156103c75760408051600480547fea8a1af00000000000000000000000000000000000000000000000000000000083529251600160a060020a03939093169263ea8a1af0928083019260009291829003018183876161da5a03f115610002575050505b600554604051600160a060020a03918216916000913091909116319082818181858883f15050505050565b426000556100df6101a4565b600280546001908101909155429055565b600454600160a060020a03908116339091161461042b576100df565b610433610123565b151561043e576100df565b6103fe6101a456",
+ "codeHash": "0xd1255e5eabbe40c6e18c87b2ed2acf8157356103d1ca1df617f7b52811edefc4",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000056d0009b",
"0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000008b",
@@ -253,6 +256,7 @@
"0x651913977e8140c323997fce5e03c19e0015eebf": {
"balance": "0x29a2241af62c0000",
"code": "0x606060405236156101a05760e060020a60003504630924120081146101c25780630a16697a146101cf5780630fd1f94e146101d8578063137c638b1461022e57806321835af61461023b57806324032866146102545780632f95b833146102d65780633017fe24146102e55780633233c686146102ef57806337f4c00e146102fa5780634500054f146103055780634e417a98146103785780634e71d92d146103e15780634f059a43146103f35780636146195414610451578063625cc4651461046157806367ce940d1461046a5780637d298ee314610477578063830953ab146104f9578063938b5f321461050457806395ee122114610516578063974654f41461052a578063a06db7dc14610535578063a9d2293d14610541578063ae45850b14610597578063b0f07e44146105a9578063c19d93fb146105cb578063c6502da81461062e578063c680362214610637578063ca94692d1461064a578063cc3471af14610673578063d379be23146106c9578063d62457f6146106e3578063ea8a1af0146106ee578063f5562753146107f3578063f6b4dfb414610854575b610868600080548190600160a060020a03908116339091161461087a57610994565b610868600b5460ff165b90565b610868600d5481565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630fd1f94e6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6108685b62012cc86101cc565b61086860043560008160001415610dc65750600161084f565b6108686004356024356000731deeda36e15ec9e80f3d7414d67a4803ae45fc80630bd295e6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f215610002575050604051519150505b92915050565b61099860085461ffff166101cc565b61086860026101cc565b610868600a546101cc565b6108686006546101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a09431546003600050336040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b6109af60408051602081810183526000825282516004805460026001821615610100026000190190911604601f81018490048402830184019095528482529293909291830182828015610a7d5780601f10610a5257610100808354040283529160200191610a7d565b61086860006000600180610b7b6105cf565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753436040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1d6000600480610c986105cf565b61086860025481565b6108685b620186a06101cc565b6108686004356024355b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a1873db6600360005085856040518460e060020a0281526004018084815260200183600160a060020a03168152602001828152602001935050505060206040518083038160008760325a03f2156100025750506040515191506102d09050565b6108686009546101cc565b610a1f600c54600160a060020a031681565b610868600b5462010000900460ff166101cc565b6108686007546101cc565b610a3c600e5460ff1681565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063a9d2293d6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600054600160a060020a031681565b610868600080548190600160a060020a039081163390911614610a8957610994565b6108685b6000731deeda36e15ec9e80f3d7414d67a4803ae45fc80635054d98a60036000506040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b61086860015481565b610868600b54610100900460ff166101cc565b61086860035474010000000000000000000000000000000000000000900460e060020a026101cc565b6108686000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063cc3471af6040518160e060020a02815260040180905060206040518083038160008760325a03f2156100025750506040515191506101cc9050565b610a1f600854620100009004600160a060020a03166101cc565b6108686005546101cc565b610a1d604080517fa09431540000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc809163a0943154916044808301926020929190829003018160008760325a03f215610002575050604051511590506107f157604080517f7e9265620000000000000000000000000000000000000000000000000000000081526003600482015233600160a060020a031660248201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091637e9265629160448083019260009291908290030181838760325a03f215610002575050505b565b6108686004356000731deeda36e15ec9e80f3d7414d67a4803ae45fc8063f5562753836040518260e060020a0281526004018082815260200191505060206040518083038160008760325a03f215610002575050604051519150505b919050565b610a1f600354600160a060020a03166101cc565b60408051918252519081900360200190f35b60045460006002600183161561010002600019019092169190910411156108a45760009150610994565b6108ac6105cf565b9050600081141580156108c0575060018114155b80156108cd575060028114155b156108db5760009150610994565b600480546000828152602060026001841615610100026000190190931692909204601f908101929092047f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b9081019236929083901061095d5782800160ff198235161785555b5061098d9291505b808211156109945760008155600101610949565b82800160010185558215610941579182015b8281111561094157823582600050559160200191906001019061096f565b5050600191505b5090565b6040805161ffff9092168252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610a0f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b005b60408051600160a060020a03929092168252519081900360200190f35b6040805160ff9092168252519081900360200190f35b820191906000526020600020905b815481529060010190602001808311610a6057829003601f168201915b505050505090506101cc565b6004546000600260018316156101000260001901909216919091041115610ab35760009150610994565b610abb6105cf565b905060008114158015610acf575060018114155b8015610adc575060028114155b15610aea5760009150610994565b604080517f7c0278fc000000000000000000000000000000000000000000000000000000008152600360048201818152602483019384523660448401819052731deeda36e15ec9e80f3d7414d67a4803ae45fc8094637c0278fc946000939190606401848480828437820191505094505050505060006040518083038160008760325a03f215610002575050505090565b1415610c8557604080516001547f0fee183d0000000000000000000000000000000000000000000000000000000082526003600483015233600160a060020a0316602483015234604483015260648201529051731deeda36e15ec9e80f3d7414d67a4803ae45fc8091630fee183d916084828101926020929190829003018160008760325a03f21561000257505060405151925050811515610c8a577389efe605e9ecbe22849cd85d5449cc946c26f8f36312c82bcc33346040518360e060020a0281526004018083600160a060020a031681526020018281526020019250505060206040518083038160008760325a03f2156100025750506040515115159050610c8a57610002565b505090565b81925050610994565b505b50565b1415610c93575a9150610cab3383610481565b1515610cb75750610c95565b731deeda36e15ec9e80f3d7414d67a4803ae45fc8063da46be0a60038433610cdd61046e565b610ce5610232565b6040518660e060020a0281526004018086815260200185815260200184600160a060020a031681526020018381526020018281526020019550505050505060006040518083038160008760325a03f21561000257505050610c933360408051600080547fc17e6817000000000000000000000000000000000000000000000000000000008352600160a060020a03908116600484015230163160248301529151731deeda36e15ec9e80f3d7414d67a4803ae45fc809263c17e68179260448082019360209390928390039091019082908760325a03f2156100025750505050565b30600160a060020a031660405180807f5f5f6469672875696e7432353629000000000000000000000000000000000000815260200150600e019050604051809103902060e060020a8091040260e060020a9004600184036040518260e060020a0281526004018082815260200191505060006040518083038160008760325a03f292505050151561084f5761000256",
+ "codeHash": "0x7678943ba1f399d76abe8e77b6f899c193f72aaefb5c4bd47fffb63c7f57ad9e",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000007dd677b54fc954824a7bc49bd26cbdfa12c75adf",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000011f8119429ed3a",
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create_disable_code_and_storage.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create_disable_code_and_storage.json
index 96c93e7cf8..9b6b7577f9 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create_disable_code_and_storage.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/inner_create_disable_code_and_storage.json
@@ -211,13 +211,16 @@
},
"0x6c8f2a135f6ed072de4503bd7c4999a1a17f824b": {
"balance": "0x0",
- "nonce": 237
+ "nonce": 237,
+ "codeHash":"0x461e17b7ae561793f22843985fc6866a3395c1fcee8ebf2d7ed5f293aec1b473"
},
"0x741467b251fca923d6229c4b439078b55dca233b": {
- "balance": "0x29c613529e8218f8"
+ "balance": "0x29c613529e8218f8",
+ "codeHash":"0x7678943ba1f399d76abe8e77b6f899c193f72aaefb5c4bd47fffb63c7f57ad9e"
},
"0x7dd677b54fc954824a7bc49bd26cbdfa12c75adf": {
- "balance": "0xd7a58f5b73b4b6c4"
+ "balance": "0xd7a58f5b73b4b6c4",
+ "codeHash":"0xd1255e5eabbe40c6e18c87b2ed2acf8157356103d1ca1df617f7b52811edefc4"
},
"0xb834e3edfc1a927bdcecb67a9d0eccbd752a5bb3": {
"balance": "0xffe9b09a5c474dca",
@@ -233,7 +236,8 @@
"balance": "0x98e2b02f14529b1eb2"
},
"0x651913977e8140c323997fce5e03c19e0015eebf": {
- "balance": "0x29a2241af62c0000"
+ "balance": "0x29a2241af62c0000",
+ "codeHash":"0x7678943ba1f399d76abe8e77b6f899c193f72aaefb5c4bd47fffb63c7f57ad9e"
},
"0x6c8f2a135f6ed072de4503bd7c4999a1a17f824b": {
"nonce": 238
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json
index be4981b8b8..c97e16bce5 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple.json
@@ -68,6 +68,7 @@
"balance": "0x4d87094125a369d9bd5",
"nonce": 1,
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
}
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple_disable_code_and_storage.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple_disable_code_and_storage.json
index 502149de43..9041901790 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple_disable_code_and_storage.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/simple_disable_code_and_storage.json
@@ -67,6 +67,7 @@
"balance": "0x4d87094125a369d9bd5",
"nonce": 1,
"code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029",
+ "codeHash": "0xec0ba40983fafc34be1bda1b3a3c6eabdd60fa4ce6eab345be1e51bda01d0d4f",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834"
}
diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json
index 3f07146871..23ac6852d9 100644
--- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json
+++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/suicide.json
@@ -79,6 +79,7 @@
"0x2861bf89b6c640c79040d357c1e9513693ef5d3f": {
"balance": "0x0",
"code": "0x606060405236156100825760e060020a600035046312055e8f8114610084578063185061da146100b157806322beb9b9146100d5578063245a03ec146101865780633fa4f245146102a657806341c0e1b5146102af578063890eba68146102cb578063b29f0835146102de578063d6b4485914610308578063dd012a15146103b9575b005b6001805474ff0000000000000000000000000000000000000000191660a060020a60043502179055610082565b6100826001805475ff00000000000000000000000000000000000000000019169055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527fb29f0835000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b6100826004356024356001547fb0f07e440000000000000000000000000000000000000000000000000000000060609081526064839052600160a060020a039091169063b0f07e449060849060009060248183876161da5a03f150604080516001547f73657449742875696e74323536290000000000000000000000000000000000008252825191829003600e018220878352835192839003602001832060e060020a6352afbc33028452600160a060020a03308116600486015260e060020a9283900490920260248501526044840152438901606484015260a060020a820460ff1694830194909452600060a483018190529251931694506352afbc33935060c48181019391829003018183876161da5a03f115610002575050505050565b6103c460025481565b61008260005433600160a060020a039081169116146103ce575b565b6103c460015460a860020a900460ff1681565b6100826001805475ff000000000000000000000000000000000000000000191660a860020a179055565b61008260043560015460e060020a6352afbc3302606090815230600160a060020a039081166064527f185061da000000000000000000000000000000000000000000000000000000006084527fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060a45243840160c490815260ff60a060020a85041660e452600061010481905291909316926352afbc339261012492918183876161da5a03f1156100025750505050565b600435600255610082565b6060908152602090f35b6001547f6ff96d17000000000000000000000000000000000000000000000000000000006060908152600160a060020a0330811660645290911690632e1a7d4d908290636ff96d17906084906020906024816000876161da5a03f1156100025750506040805180517f2e1a7d4d0000000000000000000000000000000000000000000000000000000082526004820152905160248281019350600092829003018183876161da5a03f115610002575050600054600160a060020a03169050ff",
+ "codeHash": "0xad3e5642a709b936c0eafdd1fbca08a9f5f5089ff2008efeee3eed3f110d83d3",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000d3cda913deb6f67967b99d67acdfa1712c293601",
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000ff30c9e568f133adce1f1ea91e189613223fc461b9"
diff --git a/eth/tracers/internal/tracetest/util.go b/eth/tracers/internal/tracetest/util.go
index 8253fb416d..d7bf6d7dc3 100644
--- a/eth/tracers/internal/tracetest/util.go
+++ b/eth/tracers/internal/tracetest/util.go
@@ -72,7 +72,7 @@ func (c *traceContext) toBlockContext(genesis *core.Genesis, statedb types.State
context.Random = &genesis.Mixhash
}
- if genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil {
+ if !genesis.Config.IsOptimism() && genesis.ExcessBlobGas != nil && genesis.BlobGasUsed != nil {
header := &types.Header{Number: genesis.Config.LondonBlock, Time: *genesis.Config.CancunTime}
excess := eip4844.CalcExcessBlobGas(genesis.Config, header, genesis.Timestamp)
header.ExcessBlobGas = &excess
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index a12b990a93..7f376a27fc 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -65,7 +65,7 @@ func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainCo
tracer.OnTxStart(evm.GetVMContext(), types.NewTx(&types.LegacyTx{Gas: gasLimit, GasPrice: vmctx.txCtx.GasPrice}), contract.Caller())
tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), []byte{}, startGas, value.ToBig())
- ret, err := evm.Interpreter().Run(contract, []byte{}, false)
+ ret, err := evm.Run(contract, []byte{}, false)
tracer.OnExit(0, ret, startGas-contract.Gas, err, true)
// Rest gas assumes no refund
tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil)
diff --git a/eth/tracers/live/supply.go b/eth/tracers/live/supply.go
index 47bd227259..f02cc2de57 100644
--- a/eth/tracers/live/supply.go
+++ b/eth/tracers/live/supply.go
@@ -199,8 +199,7 @@ func (s *supplyTracer) onBalanceChange(a common.Address, prevBalance, newBalance
// NOTE: don't handle "BalanceIncreaseGenesisBalance" because it is handled in OnGenesisBlock
switch reason {
- case tracing.BalanceIncreaseRewardMineUncle:
- case tracing.BalanceIncreaseRewardMineBlock:
+ case tracing.BalanceIncreaseRewardMineBlock, tracing.BalanceIncreaseRewardMineUncle:
s.delta.Issuance.Reward.Add(s.delta.Issuance.Reward, diff)
case tracing.BalanceIncreaseWithdrawal:
s.delta.Issuance.Withdrawals.Add(s.delta.Issuance.Withdrawals, diff)
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index 12000b3b9a..acc3069e70 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -52,7 +52,7 @@ func TestStoreCapture(t *testing.T) {
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)}
var index common.Hash
logger.OnTxStart(evm.GetVMContext(), nil, common.Address{})
- _, err := evm.Interpreter().Run(contract, []byte{}, false)
+ _, err := evm.Run(contract, []byte{}, false)
if err != nil {
t.Fatal(err)
}
diff --git a/eth/tracers/native/gen_account_json.go b/eth/tracers/native/gen_account_json.go
index 4c39cbc38c..5fec2648b7 100644
--- a/eth/tracers/native/gen_account_json.go
+++ b/eth/tracers/native/gen_account_json.go
@@ -15,14 +15,16 @@ var _ = (*accountMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (a account) MarshalJSON() ([]byte, error) {
type account struct {
- Balance *hexutil.Big `json:"balance,omitempty"`
- Code hexutil.Bytes `json:"code,omitempty"`
- Nonce uint64 `json:"nonce,omitempty"`
- Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
+ Balance *hexutil.Big `json:"balance,omitempty"`
+ Code hexutil.Bytes `json:"code,omitempty"`
+ CodeHash *common.Hash `json:"codeHash,omitempty"`
+ Nonce uint64 `json:"nonce,omitempty"`
+ Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
}
var enc account
enc.Balance = (*hexutil.Big)(a.Balance)
enc.Code = a.Code
+ enc.CodeHash = a.CodeHash
enc.Nonce = a.Nonce
enc.Storage = a.Storage
return json.Marshal(&enc)
@@ -31,10 +33,11 @@ func (a account) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (a *account) UnmarshalJSON(input []byte) error {
type account struct {
- Balance *hexutil.Big `json:"balance,omitempty"`
- Code *hexutil.Bytes `json:"code,omitempty"`
- Nonce *uint64 `json:"nonce,omitempty"`
- Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
+ Balance *hexutil.Big `json:"balance,omitempty"`
+ Code *hexutil.Bytes `json:"code,omitempty"`
+ CodeHash *common.Hash `json:"codeHash,omitempty"`
+ Nonce *uint64 `json:"nonce,omitempty"`
+ Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
}
var dec account
if err := json.Unmarshal(input, &dec); err != nil {
@@ -46,6 +49,9 @@ func (a *account) UnmarshalJSON(input []byte) error {
if dec.Code != nil {
a.Code = *dec.Code
}
+ if dec.CodeHash != nil {
+ a.CodeHash = dec.CodeHash
+ }
if dec.Nonce != nil {
a.Nonce = *dec.Nonce
}
diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go
index 07d5aab1c5..5d836159d1 100644
--- a/eth/tracers/native/prestate.go
+++ b/eth/tracers/native/prestate.go
@@ -44,11 +44,12 @@ func init() {
type stateMap = map[common.Address]*account
type account struct {
- Balance *big.Int `json:"balance,omitempty"`
- Code []byte `json:"code,omitempty"`
- Nonce uint64 `json:"nonce,omitempty"`
- Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
- empty bool
+ Balance *big.Int `json:"balance,omitempty"`
+ Code []byte `json:"code,omitempty"`
+ CodeHash *common.Hash `json:"codeHash,omitempty"`
+ Nonce uint64 `json:"nonce,omitempty"`
+ Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
+ empty bool
}
func (a *account) exists() bool {
@@ -251,6 +252,7 @@ func (t *prestateTracer) processDiffState() {
postAccount := &account{Storage: make(map[common.Hash]common.Hash)}
newBalance := t.env.StateDB.GetBalance(addr).ToBig()
newNonce := t.env.StateDB.GetNonce(addr)
+ newCodeHash := t.env.StateDB.GetCodeHash(addr)
if newBalance.Cmp(t.pre[addr].Balance) != 0 {
modified = true
@@ -260,6 +262,19 @@ func (t *prestateTracer) processDiffState() {
modified = true
postAccount.Nonce = newNonce
}
+ prevCodeHash := common.Hash{}
+ if t.pre[addr].CodeHash != nil {
+ prevCodeHash = *t.pre[addr].CodeHash
+ }
+ // Empty code hashes are excluded from the prestate. Normalize
+ // the empty code hash to a zero hash to make it comparable.
+ if newCodeHash == types.EmptyCodeHash {
+ newCodeHash = common.Hash{}
+ }
+ if newCodeHash != prevCodeHash {
+ modified = true
+ postAccount.CodeHash = &newCodeHash
+ }
if !t.config.DisableCode {
newCode := t.env.StateDB.GetCode(addr)
if !bytes.Equal(newCode, t.pre[addr].Code) {
@@ -309,6 +324,11 @@ func (t *prestateTracer) lookupAccount(addr common.Address) {
Nonce: t.env.StateDB.GetNonce(addr),
Code: t.env.StateDB.GetCode(addr),
}
+ codeHash := t.env.StateDB.GetCodeHash(addr)
+ // If the code is empty, we don't need to store it in the prestate.
+ if codeHash != (common.Hash{}) && codeHash != types.EmptyCodeHash {
+ acc.CodeHash = &codeHash
+ }
if !acc.exists() {
acc.empty = true
}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 65a96ca47a..8dabb07fec 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -199,9 +199,17 @@ func newTestBackend(t *testing.T, config *node.Config, enableHistoricalState boo
if err != nil {
return nil, nil, fmt.Errorf("can't create new ethereum service: %v", err)
}
+
+ // Ensure tx pool starts the background operation
+ txPool := ethservice.TxPool()
+ if err = txPool.Sync(); err != nil {
+ return nil, nil, fmt.Errorf("can't sync transaction pool: %v", err)
+ }
+
if enableHistoricalState { // swap to the pre-bedrock consensus-engine that we used to generate the historical blocks
ethservice.BlockChain().Engine().(*beacon.Beacon).SwapInner(ethash.NewFaker())
}
+
// Import the test chain.
if err := n.Start(); err != nil {
return nil, nil, fmt.Errorf("can't start test node: %v", err)
@@ -625,8 +633,9 @@ func testAtFunctions(t *testing.T, client *rpc.Client) {
}
// send a transaction for some interesting pending status
- // and wait for the transaction to be included in the pending block
- sendTransaction(ec)
+ if err := sendTransaction(ec); err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
// wait for the transaction to be included in the pending block
for {
diff --git a/ethclient/gethclient/gethclient.go b/ethclient/gethclient/gethclient.go
index d030878e54..54997cbf51 100644
--- a/ethclient/gethclient/gethclient.go
+++ b/ethclient/gethclient/gethclient.go
@@ -209,7 +209,7 @@ func (ec *Client) SubscribePendingTransactions(ctx context.Context, ch chan<- co
// and returns them as a JSON object.
func (ec *Client) TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig) (any, error) {
var result any
- err := ec.c.CallContext(ctx, &result, "debug_traceTransaction", hash.Hex(), config)
+ err := ec.c.CallContext(ctx, &result, "debug_traceTransaction", hash, config)
if err != nil {
return nil, err
}
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 6b74cfbe04..8eb624156c 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -233,6 +233,9 @@ func (db *Database) DeleteRange(start, end []byte) error {
return err
}
}
+ if err := it.Error(); err != nil {
+ return err
+ }
return batch.Write()
}
diff --git a/fork.yaml b/fork.yaml
index 2a329f7f42..f4c1d93b5f 100644
--- a/fork.yaml
+++ b/fork.yaml
@@ -5,7 +5,7 @@ footer: |
base:
name: go-ethereum
url: https://github.com/ethereum/go-ethereum
- hash: dd1ebac11757484575ee779381af32f29ce3fbe4 # v1.16.2
+ hash: d818a9af7bd5919808df78f31580f59382c53150 # v1.16.3
fork:
name: op-geth
url: https://github.com/ethereum-optimism/op-geth
@@ -199,7 +199,10 @@ def:
- title: Warn on missing hardfork data and emit additional metrics
globs:
- "core/blockchain.go"
- - title: Additional metrics
+ - title: Define additional header-based metrics
+ globs:
+ - "core/blockchain_optimism.go"
+ - title: Add hooks for additional header-chain metrics
globs:
- "core/headerchain.go"
- title: Optional Engine API extensions
@@ -250,12 +253,15 @@ def:
- title: "User API enhancements"
description: "Encode the Deposit Tx properties, the L1 costs, and daisy-chain RPC-calls for pre-Bedrock historical data"
sub:
- - title: "Receipts metadata"
+ - title: "Receipts metadata & deposit receipts"
description: |
Pre-Bedrock L1-cost receipt data is loaded from the database if available, and post-Bedrock the L1-cost
metadata is hydrated on-the-fly based on the L1 fee information in the corresponding block.
+ We also populate receipts with L1 block attributes like Operator Fee and DA Footprint parameters.
+ Furthermore, OP Stack introduces Deposit receipts, a special kind of receipts for Deposit transactions.
globs:
- "core/types/receipt.go"
+ - "core/types/receipt_opstack.go"
- "core/types/gen_receipt_json.go"
- "core/rawdb/accessors_chain.go"
- title: "API Backend"
diff --git a/go.mod b/go.mod
index 6ca88fee18..387655947c 100644
--- a/go.mod
+++ b/go.mod
@@ -15,15 +15,17 @@ require (
github.com/cloudflare/cloudflare-go v0.114.0
github.com/cockroachdb/pebble v1.1.5
github.com/compose-network/registry v0.0.0-20251019130932-f9a7f7dd297b
+ github.com/compose-network/specs/compose v0.0.0-20251101113612-6318cbcb3d22
github.com/consensys/gnark-crypto v0.18.0
github.com/crate-crypto/go-eth-kzg v1.3.0
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/dchest/siphash v1.2.3
github.com/deckarep/golang-set/v2 v2.6.0
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
- github.com/ethereum/c-kzg-4844/v2 v2.1.0
+ github.com/ethereum/c-kzg-4844/v2 v2.1.5
github.com/ethereum/go-verkle v0.2.2
github.com/fatih/color v1.16.0
github.com/ferranbt/fastssz v0.1.4
@@ -31,14 +33,14 @@ require (
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/gofrs/flock v0.12.1
- github.com/golang-jwt/jwt/v4 v4.5.1
+ github.com/golang-jwt/jwt/v4 v4.5.2
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.4.2
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
- github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4
+ github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.3.2
github.com/huin/goupnp v1.3.0
@@ -64,7 +66,7 @@ require (
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.2.0
github.com/stretchr/testify v1.10.0
- github.com/supranational/blst v0.3.14
+ github.com/supranational/blst v0.3.15
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/urfave/cli/v2 v2.27.5
go.uber.org/automaxprocs v1.5.2
@@ -103,7 +105,6 @@ require (
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
- github.com/compose-network/specs/compose v0.0.0-20251101113612-6318cbcb3d22 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/deepmap/oapi-codegen v1.6.0 // indirect
github.com/dlclark/regexp2 v1.7.0 // indirect
diff --git a/go.sum b/go.sum
index 43cfac32dd..c9597db702 100644
--- a/go.sum
+++ b/go.sum
@@ -78,12 +78,6 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/compose-network/registry v0.0.0-20251019130932-f9a7f7dd297b h1:HDtykkbjvoeQxhlQFDFTvGx0PamTlJcPs3/ACrbcAD0=
github.com/compose-network/registry v0.0.0-20251019130932-f9a7f7dd297b/go.mod h1:0KvBwHpojq2/ys4k6Pnsvlrtj/uGL30/KqEKkwLuMnU=
-github.com/compose-network/specs/compose v0.0.0-20251027143611-6bad137833e3 h1:vp2cNMtR2J/I9GRXKUCM9oslW7kKQC8PF4pURduP1M8=
-github.com/compose-network/specs/compose v0.0.0-20251027143611-6bad137833e3/go.mod h1:IacGPdRsAUjMyFw3Pjl9RrhXK09yCx+ZpUzRxtnmdH4=
-github.com/compose-network/specs/compose v0.0.0-20251029171617-397ad852260e h1:47gV/Rp80nuHyyZ7p+rgbRTd2cot1lFVmbHbQDL1ZoE=
-github.com/compose-network/specs/compose v0.0.0-20251029171617-397ad852260e/go.mod h1:THydsfSfs5svwPoYU6+0y0JXZdIVUcXexc9Rh33Ow2g=
-github.com/compose-network/specs/compose v0.0.0-20251029173415-29572a998d0b h1:+nrY/mB57YxzKHNoTsCdrXyE0E6Xxp1lMYZW+dbJdyE=
-github.com/compose-network/specs/compose v0.0.0-20251029173415-29572a998d0b/go.mod h1:THydsfSfs5svwPoYU6+0y0JXZdIVUcXexc9Rh33Ow2g=
github.com/compose-network/specs/compose v0.0.0-20251101113612-6318cbcb3d22 h1:j+ZmT37ZI8S24d3Vi5UAr4Q7RapNXlGciiK1h34wOus=
github.com/compose-network/specs/compose v0.0.0-20251101113612-6318cbcb3d22/go.mod h1:THydsfSfs5svwPoYU6+0y0JXZdIVUcXexc9Rh33Ow2g=
github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0=
@@ -101,6 +95,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA=
+github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc=
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
@@ -124,8 +120,8 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A=
github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
-github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w=
-github.com/ethereum/c-kzg-4844/v2 v2.1.0/go.mod h1:TC48kOKjJKPbN7C++qIgt0TJzZ70QznYR7Ob+WXl57E=
+github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
+github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/ethereum/go-verkle v0.2.2 h1:I2W0WjnrFUIzzVPwm8ykY+7pL2d4VhlsePn4j7cnFk8=
github.com/ethereum/go-verkle v0.2.2/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
@@ -163,8 +159,8 @@ github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
-github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
@@ -184,8 +180,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -201,8 +197,8 @@ github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY4
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
-github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
-github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
+github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db h1:IZUYC/xb3giYwBLMnr8d0TGTzPKFGNTCGgGLoyeX330=
+github.com/holiman/billy v0.0.0-20250707135307-f2f9b9aae7db/go.mod h1:xTEYN9KCHxuYHs+NmrmzFcnvHMzLLNiGFafCb1n3Mfg=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
@@ -369,8 +365,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo=
-github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
+github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
+github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
@@ -515,8 +511,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index 0f6ba10b90..ca864d5fb2 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -430,6 +430,40 @@ func TestWithdrawals(t *testing.T) {
}
}
+// TestGraphQLMaxDepth ensures that queries exceeding the configured maximum depth
+// are rejected to prevent resource exhaustion from deeply nested operations.
+func TestGraphQLMaxDepth(t *testing.T) {
+ stack := createNode(t)
+ defer stack.Close()
+
+ h, err := newHandler(stack, nil, nil, []string{}, []string{})
+ if err != nil {
+ t.Fatalf("could not create graphql service: %v", err)
+ }
+
+ var b strings.Builder
+ for i := 0; i < maxQueryDepth+1; i++ {
+ b.WriteString("ommers{")
+ }
+ b.WriteString("number")
+ for i := 0; i < maxQueryDepth+1; i++ {
+ b.WriteString("}")
+ }
+ query := fmt.Sprintf("{block{%s}}", b.String())
+
+ res := h.Schema.Exec(context.Background(), query, "", nil)
+ var found bool
+ for _, err := range res.Errors {
+ if err.Rule == "MaxDepthExceeded" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("expected max depth exceeded error, got %v", res.Errors)
+ }
+}
+
func createNode(t *testing.T) *node.Node {
stack, err := node.New(&node.Config{
HTTPHost: "127.0.0.1",
diff --git a/graphql/service.go b/graphql/service.go
index 421bec3cdd..f3a144a9b7 100644
--- a/graphql/service.go
+++ b/graphql/service.go
@@ -32,6 +32,9 @@ import (
gqlErrors "github.com/graph-gophers/graphql-go/errors"
)
+// maxQueryDepth limits the maximum field nesting depth allowed in GraphQL queries.
+const maxQueryDepth = 20
+
type handler struct {
Schema *graphql.Schema
}
@@ -116,7 +119,7 @@ func New(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterS
func newHandler(stack *node.Node, backend ethapi.Backend, filterSystem *filters.FilterSystem, cors, vhosts []string) (*handler, error) {
q := Resolver{backend, filterSystem}
- s, err := graphql.ParseSchema(schema, &q)
+ s, err := graphql.ParseSchema(schema, &q, graphql.MaxDepth(maxQueryDepth))
if err != nil {
return nil, err
}
diff --git a/internal/build/file.go b/internal/build/file.go
index 2d8c993f36..7490af281e 100644
--- a/internal/build/file.go
+++ b/internal/build/file.go
@@ -34,23 +34,6 @@ func FileExist(path string) bool {
return true
}
-// HashFiles iterates the provided set of files, computing the hash of each.
-func HashFiles(files []string) (map[string][32]byte, error) {
- res := make(map[string][32]byte)
- for _, filePath := range files {
- f, err := os.OpenFile(filePath, os.O_RDONLY, 0666)
- if err != nil {
- return nil, err
- }
- hasher := sha256.New()
- if _, err := io.Copy(hasher, f); err != nil {
- return nil, err
- }
- res[filePath] = [32]byte(hasher.Sum(nil))
- }
- return res, nil
-}
-
// HashFolder iterates all files under the given directory, computing the hash
// of each.
func HashFolder(folder string, exlude []string) (map[string][32]byte, error) {
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 89183678e5..982b4c7d42 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -685,19 +686,30 @@ func headerByNumberOrHash(ctx context.Context, b Backend, blockNrOrHash rpc.Bloc
// GetBlockReceipts returns the block receipts for the given block hash or number or tag.
func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]map[string]interface{}, error) {
- block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash)
- if block == nil || err != nil {
- return nil, err
- }
- receipts, err := api.b.GetReceipts(ctx, block.Hash())
- if err != nil {
- return nil, err
+ var (
+ err error
+ block *types.Block
+ receipts types.Receipts
+ )
+ if blockNr, ok := blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber {
+ block, receipts, _ = api.b.Pending()
+ if block == nil {
+ return nil, errors.New("pending receipts is not available")
+ }
+ } else {
+ block, err = api.b.BlockByNumberOrHash(ctx, blockNrOrHash)
+ if block == nil || err != nil {
+ return nil, err
+ }
+ receipts, err = api.b.GetReceipts(ctx, block.Hash())
+ if err != nil {
+ return nil, err
+ }
}
txs := block.Transactions()
if len(txs) != len(receipts) {
return nil, fmt.Errorf("receipts length mismatch: %d vs %d", len(txs), len(receipts))
}
-
// Derive the sender.
signer := types.MakeSigner(api.b.ChainConfig(), block.Number(), block.Time())
@@ -705,7 +717,6 @@ func (api *BlockChainAPI) GetBlockReceipts(ctx context.Context, blockNrOrHash rp
for i, receipt := range receipts {
result[i] = marshalReceipt(receipt, block.Hash(), block.NumberU64(), signer, txs[i], i, api.b.ChainConfig())
}
-
return result, nil
}
@@ -926,7 +937,15 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr
if state == nil || err != nil {
return 0, err
}
- if err := overrides.Apply(state, nil); err != nil {
+ blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil, b.ChainConfig(), state)
+ if blockOverrides != nil {
+ if err := blockOverrides.Apply(&blockCtx); err != nil {
+ return 0, err
+ }
+ }
+ rules := b.ChainConfig().Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time)
+ precompiles := vm.ActivePrecompiledContracts(rules)
+ if err := overrides.Apply(state, precompiles); err != nil {
return 0, err
}
// Construct the gas estimator option from the user input
@@ -1323,6 +1342,71 @@ func (api *BlockChainAPI) CreateAccessList(ctx context.Context, args Transaction
return result, nil
}
+type config struct {
+ ActivationTime uint64 `json:"activationTime"`
+ BlobSchedule *params.BlobConfig `json:"blobSchedule"`
+ ChainId *hexutil.Big `json:"chainId"`
+ ForkId hexutil.Bytes `json:"forkId"`
+ Precompiles map[string]common.Address `json:"precompiles"`
+ SystemContracts map[string]common.Address `json:"systemContracts"`
+}
+
+type configResponse struct {
+ Current *config `json:"current"`
+ Next *config `json:"next"`
+ Last *config `json:"last"`
+}
+
+// Config implements the EIP-7910 eth_config method.
+func (api *BlockChainAPI) Config(ctx context.Context) (*configResponse, error) {
+ genesis, err := api.b.HeaderByNumber(ctx, 0)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load genesis: %w", err)
+ }
+ assemble := func(c *params.ChainConfig, ts *uint64) *config {
+ if ts == nil {
+ return nil
+ }
+ t := *ts
+
+ var (
+ rules = c.Rules(c.LondonBlock, true, t)
+ precompiles = make(map[string]common.Address)
+ )
+ for addr, c := range vm.ActivePrecompiledContracts(rules) {
+ precompiles[c.Name()] = addr
+ }
+ // Activation time is required. If a fork is activated at genesis the value 0 is used
+ activationTime := t
+ if genesis.Time >= t {
+ activationTime = 0
+ }
+ forkid := forkid.NewID(c, types.NewBlockWithHeader(genesis), ^uint64(0), t).Hash
+ return &config{
+ ActivationTime: activationTime,
+ BlobSchedule: c.BlobConfig(c.LatestFork(t)),
+ ChainId: (*hexutil.Big)(c.ChainID),
+ ForkId: forkid[:],
+ Precompiles: precompiles,
+ SystemContracts: c.ActiveSystemContracts(t),
+ }
+ }
+ var (
+ c = api.b.ChainConfig()
+ t = api.b.CurrentHeader().Time
+ )
+ resp := configResponse{
+ Next: assemble(c, c.Timestamp(c.LatestFork(t)+1)),
+ Current: assemble(c, c.Timestamp(c.LatestFork(t))),
+ Last: assemble(c, c.Timestamp(c.LatestFork(^uint64(0)))),
+ }
+ // Nil out last if no future-fork is configured.
+ if resp.Next == nil {
+ resp.Last = nil
+ }
+ return &resp, nil
+}
+
// AccessList creates an access list for the given transaction.
// If the accesslist creation fails an error is returned.
// If the transaction itself fails, an vmErr is returned.
@@ -1629,7 +1713,7 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u
"effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice),
}
- if chainConfig.Optimism != nil && !tx.IsDepositTx() {
+ if chainConfig.IsOptimism() && !tx.IsDepositTx() {
fields["l1GasPrice"] = (*hexutil.Big)(receipt.L1GasPrice)
fields["l1GasUsed"] = (*hexutil.Big)(receipt.L1GasUsed)
fields["l1Fee"] = (*hexutil.Big)(receipt.L1Fee)
@@ -1654,8 +1738,14 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u
if receipt.OperatorFeeConstant != nil {
fields["operatorFeeConstant"] = hexutil.Uint64(*receipt.OperatorFeeConstant)
}
+ // Fields added in Jovian
+ if receipt.DAFootprintGasScalar != nil {
+ fields["daFootprintGasScalar"] = hexutil.Uint64(*receipt.DAFootprintGasScalar)
+ // Jovian repurposes blobGasUsed for DA footprint gas used
+ fields["blobGasUsed"] = hexutil.Uint64(receipt.BlobGasUsed)
+ }
}
- if chainConfig.Optimism != nil && tx.IsDepositTx() && receipt.DepositNonce != nil {
+ if chainConfig.IsOptimism() && tx.IsDepositTx() && receipt.DepositNonce != nil {
fields["depositNonce"] = hexutil.Uint64(*receipt.DepositNonce)
if receipt.DepositReceiptVersion != nil {
fields["depositReceiptVersion"] = hexutil.Uint64(*receipt.DepositReceiptVersion)
@@ -1730,6 +1820,8 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
// transaction pool.
+//
+// This API is not capable for submitting blob transaction with sidecar.
func (api *TransactionAPI) SendTransaction(ctx context.Context, args TransactionArgs) (common.Hash, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.from()}
@@ -1750,7 +1842,7 @@ func (api *TransactionAPI) SendTransaction(ctx context.Context, args Transaction
}
// Set some sanity defaults and terminate on failure
- if err := args.setDefaults(ctx, api.b, false); err != nil {
+ if err := args.setDefaults(ctx, api.b, sidecarConfig{}); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
@@ -1785,10 +1877,19 @@ func (api *TransactionAPI) SendXTransaction(ctx context.Context, input hexutil.B
// on a given unsigned transaction, and returns it to the caller for further
// processing (signing + broadcast).
func (api *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
- args.blobSidecarAllowed = true
-
// Set some sanity defaults and terminate on failure
- if err := args.setDefaults(ctx, api.b, false); err != nil {
+ sidecarVersion := types.BlobSidecarVersion0
+ if len(args.Blobs) > 0 {
+ h := api.b.CurrentHeader()
+ if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
+ sidecarVersion = types.BlobSidecarVersion1
+ }
+ }
+ config := sidecarConfig{
+ blobSidecarAllowed: true,
+ blobSidecarVersion: sidecarVersion,
+ }
+ if err := args.setDefaults(ctx, api.b, config); err != nil {
return nil, err
}
// Assemble the transaction and obtain rlp
@@ -1845,8 +1946,6 @@ type SignTransactionResult struct {
// The node needs to have the private key of the account corresponding with
// the given from address and it needs to be unlocked.
func (api *TransactionAPI) SignTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
- args.blobSidecarAllowed = true
-
if args.Gas == nil {
return nil, errors.New("gas not specified")
}
@@ -1859,7 +1958,19 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction
if args.Nonce == nil {
return nil, errors.New("nonce not specified")
}
- if err := args.setDefaults(ctx, api.b, false); err != nil {
+ sidecarVersion := types.BlobSidecarVersion0
+ if len(args.Blobs) > 0 {
+ h := api.b.CurrentHeader()
+ if api.b.ChainConfig().IsOsaka(h.Number, h.Time) {
+ sidecarVersion = types.BlobSidecarVersion1
+ }
+ }
+
+ config := sidecarConfig{
+ blobSidecarAllowed: true,
+ blobSidecarVersion: sidecarVersion,
+ }
+ if err := args.setDefaults(ctx, api.b, config); err != nil {
return nil, err
}
// Before actually sign the transaction, ensure the transaction fee is reasonable.
@@ -1875,7 +1986,7 @@ func (api *TransactionAPI) SignTransaction(ctx context.Context, args Transaction
// no longer retains the blobs, only the blob hashes. In this step, we need
// to put back the blob(s).
if args.IsEIP4844() {
- signed = signed.WithBlobTxSidecar(types.NewBlobTxSidecar(types.BlobSidecarVersion0, args.Blobs, args.Commitments, args.Proofs))
+ signed = signed.WithBlobTxSidecar(types.NewBlobTxSidecar(sidecarVersion, args.Blobs, args.Commitments, args.Proofs))
}
data, err := signed.MarshalBinary()
if err != nil {
@@ -1910,11 +2021,13 @@ func (api *TransactionAPI) PendingTransactions() ([]*RPCTransaction, error) {
// Resend accepts an existing transaction and a new gas price and limit. It will remove
// the given transaction from the pool and reinsert it with the new gas price and limit.
+//
+// This API is not capable for submitting blob transaction with sidecar.
func (api *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, gasPrice *hexutil.Big, gasLimit *hexutil.Uint64) (common.Hash, error) {
if sendArgs.Nonce == nil {
return common.Hash{}, errors.New("missing transaction nonce in transaction spec")
}
- if err := sendArgs.setDefaults(ctx, api.b, false); err != nil {
+ if err := sendArgs.setDefaults(ctx, api.b, sidecarConfig{}); err != nil {
return common.Hash{}, err
}
matchTx := sendArgs.ToTransaction(types.LegacyTxType)
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 5cb325ec12..4dcea1523c 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -24,7 +24,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "github.com/ethereum/go-ethereum/internal/xproto/rollup/v1"
"math"
"math/big"
"os"
@@ -35,12 +34,10 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/accounts/abi"
- "github.com/ethereum/go-ethereum/core/ssv"
- "github.com/ethereum/go-ethereum/internal/ethapi/override"
-
+ "github.com/compose-network/specs/compose/proto"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
+ "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -50,6 +47,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/filtermaps"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/ssv"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -58,6 +56,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/blocktest"
+ "github.com/ethereum/go-ethereum/internal/ethapi/override"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
@@ -594,14 +593,16 @@ func newTestAccountManager(t *testing.T) (*accounts.Manager, accounts.Account) {
}
type testBackend struct {
- db ethdb.Database
- chain *core.BlockChain
- pending *types.Block
- accman *accounts.Manager
- acc accounts.Account
+ db ethdb.Database
+ chain *core.BlockChain
+ accman *accounts.Manager
+ acc accounts.Account
+
+ pending *types.Block
+ pendingReceipts types.Receipts
}
-func (b *testBackend) HandleSPMessage(ctx context.Context, msg *rollupv1.Message) ([]common.Hash, error) {
+func (b *testBackend) HandleSPMessage(context.Context, *proto.Message) ([]common.Hash, error) {
//TODO implement me
panic("implement me")
}
@@ -624,24 +625,26 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
gspec.Alloc[acc.Address] = types.Account{Balance: big.NewInt(params.Ether)}
// Generate blocks for testing
- db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator)
+ db, blocks, receipts := core.GenerateChainWithGenesis(gspec, engine, n+1, generator)
chain, err := core.NewBlockChain(db, gspec, engine, options)
if err != nil {
t.Fatalf("failed to create tester chain: %v", err)
}
- if n, err := chain.InsertChain(blocks); err != nil {
+ if n, err := chain.InsertChain(blocks[:n]); err != nil {
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
}
-
- backend := &testBackend{db: db, chain: chain, accman: accman, acc: acc}
+ backend := &testBackend{
+ db: db,
+ chain: chain,
+ accman: accman,
+ acc: acc,
+ pending: blocks[n],
+ pendingReceipts: receipts[n],
+ }
return backend
}
-func (b *testBackend) setPendingBlock(block *types.Block) {
- b.pending = block
-}
-
func (b testBackend) SyncProgress(ctx context.Context) ethereum.SyncProgress {
return ethereum.SyncProgress{}
}
@@ -733,7 +736,13 @@ func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOr
}
panic("only implemented for number")
}
-func (b testBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) { panic("implement me") }
+func (b testBackend) Pending() (*types.Block, types.Receipts, *state.StateDB) {
+ block := b.pending
+ if block == nil {
+ return nil, nil, nil
+ }
+ return block, b.pendingReceipts, nil
+}
func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
header, err := b.HeaderByHash(ctx, hash)
if header == nil || err != nil {
@@ -2847,19 +2856,53 @@ func TestSendBlobTransaction(t *testing.T) {
func TestFillBlobTransaction(t *testing.T) {
t.Parallel()
+
+ testFillBlobTransaction(t, false)
+ testFillBlobTransaction(t, true)
+}
+
+func testFillBlobTransaction(t *testing.T, osaka bool) {
// Initialize test accounts
+ config := *params.MergedTestChainConfig
+ if !osaka {
+ config.OsakaTime = nil
+ }
var (
key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
to = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
- Config: params.MergedTestChainConfig,
+ Config: &config,
Alloc: types.GenesisAlloc{},
}
- emptyBlob = new(kzg4844.Blob)
- emptyBlobs = []kzg4844.Blob{*emptyBlob}
- emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
- emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
- emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
+ emptyBlob = new(kzg4844.Blob)
+ emptyBlobs = []kzg4844.Blob{*emptyBlob}
+ emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
+ emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
+ emptyBlobCellProofs, _ = kzg4844.ComputeCellProofs(emptyBlob)
+ emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
+
+ fillEmptyKZGProofs = func(blobs int) []kzg4844.Proof {
+ if osaka {
+ return make([]kzg4844.Proof, blobs*kzg4844.CellProofsPerBlob)
+ }
+ return make([]kzg4844.Proof, blobs)
+ }
+ expectSidecar = func() *types.BlobTxSidecar {
+ if osaka {
+ return types.NewBlobTxSidecar(
+ types.BlobSidecarVersion1,
+ emptyBlobs,
+ []kzg4844.Commitment{emptyBlobCommit},
+ emptyBlobCellProofs,
+ )
+ }
+ return types.NewBlobTxSidecar(
+ types.BlobSidecarVersion0,
+ emptyBlobs,
+ []kzg4844.Commitment{emptyBlobCommit},
+ []kzg4844.Proof{emptyBlobProof},
+ )
+ }
)
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
b.SetPoS()
@@ -2919,7 +2962,7 @@ func TestFillBlobTransaction(t *testing.T) {
Commitments: []kzg4844.Commitment{{}, {}},
Proofs: []kzg4844.Proof{{}},
},
- err: `number of blobs and proofs mismatch (have=1, want=2)`,
+ err: fmt.Sprintf(`number of blobs and proofs mismatch (have=1, want=%d)`, len(fillEmptyKZGProofs(2))),
},
{
name: "TestInvalidProofVerification",
@@ -2929,7 +2972,7 @@ func TestFillBlobTransaction(t *testing.T) {
Value: (*hexutil.Big)(big.NewInt(1)),
Blobs: []kzg4844.Blob{{}, {}},
Commitments: []kzg4844.Commitment{{}, {}},
- Proofs: []kzg4844.Proof{{}, {}},
+ Proofs: fillEmptyKZGProofs(2),
},
err: `failed to verify blob proof: short buffer`,
},
@@ -2945,7 +2988,7 @@ func TestFillBlobTransaction(t *testing.T) {
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
- Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, emptyBlobs, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}),
+ Sidecar: expectSidecar(),
},
},
{
@@ -2961,7 +3004,7 @@ func TestFillBlobTransaction(t *testing.T) {
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
- Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, emptyBlobs, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}),
+ Sidecar: expectSidecar(),
},
},
{
@@ -2987,7 +3030,7 @@ func TestFillBlobTransaction(t *testing.T) {
},
want: &result{
Hashes: []common.Hash{emptyBlobHash},
- Sidecar: types.NewBlobTxSidecar(types.BlobSidecarVersion0, emptyBlobs, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}),
+ Sidecar: expectSidecar(),
},
},
}
@@ -3331,21 +3374,6 @@ func TestRPCGetBlockOrHeader(t *testing.T) {
}
genBlocks = 10
signer = types.HomesteadSigner{}
- tx = types.NewTx(&types.LegacyTx{
- Nonce: 11,
- GasPrice: big.NewInt(11111),
- Gas: 1111,
- To: &acc2Addr,
- Value: big.NewInt(111),
- Data: []byte{0x11, 0x11, 0x11},
- })
- withdrawal = &types.Withdrawal{
- Index: 0,
- Validator: 1,
- Address: common.Address{0x12, 0x34},
- Amount: 10,
- }
- pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, &types.Body{Transactions: types.Transactions{tx}, Withdrawals: types.Withdrawals{withdrawal}}, nil, blocktest.NewHasher(), types.DefaultBlockConfig)
)
backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) {
// Transfer from account[0] to account[1]
@@ -3354,7 +3382,6 @@ func TestRPCGetBlockOrHeader(t *testing.T) {
tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key)
b.AddTx(tx)
})
- backend.setPendingBlock(pending)
api := NewBlockChainAPI(backend)
blockHashes := make([]common.Hash, genBlocks+1)
ctx := context.Background()
@@ -3365,7 +3392,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) {
}
blockHashes[i] = header.Hash()
}
- pendingHash := pending.Hash()
+ pendingHash := backend.pending.Hash()
var testSuite = []struct {
blockNumber rpc.BlockNumber
@@ -3596,7 +3623,7 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
},
}
signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID)
- txHashes = make([]common.Hash, genBlocks)
+ txHashes = make([]common.Hash, 0, genBlocks)
)
backend := newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
@@ -3606,9 +3633,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
)
b.SetPoS()
switch i {
- case 0:
- // transfer 1000wei
- tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key)
case 1:
// create contract
tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key)
@@ -3645,13 +3669,16 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
BlobHashes: []common.Hash{{1}},
Value: new(uint256.Int),
}), signer, acc1Key)
+ default:
+ // transfer 1000wei
+ tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key)
}
if err != nil {
t.Errorf("failed to sign tx: %v", err)
}
if tx != nil {
b.AddTx(tx)
- txHashes[i] = tx.Hash()
+ txHashes = append(txHashes, tx.Hash())
}
})
return backend, txHashes
@@ -3767,6 +3794,11 @@ func TestRPCGetBlockReceipts(t *testing.T) {
test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber),
file: "tag-latest",
},
+ // 3. pending tag
+ {
+ test: rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber),
+ file: "tag-pending",
+ },
// 4. block with legacy transfer tx(hash)
{
test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false),
@@ -3916,3 +3948,130 @@ func TestCreateAccessListWithStateOverrides(t *testing.T) {
}}
require.Equal(t, expected, result.Accesslist)
}
+
+func TestEstimateGasWithMovePrecompile(t *testing.T) {
+ t.Parallel()
+ // Initialize test accounts
+ var (
+ accounts = newAccounts(2)
+ genesis = &core.Genesis{
+ Config: params.MergedTestChainConfig,
+ Alloc: types.GenesisAlloc{
+ accounts[0].addr: {Balance: big.NewInt(params.Ether)},
+ },
+ }
+ )
+ backend := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
+ b.SetPoS()
+ })
+ api := NewBlockChainAPI(backend)
+ // Move SHA256 precompile (0x2) to a new address (0x100)
+ // and estimate gas for calling the moved precompile.
+ var (
+ sha256Addr = common.BytesToAddress([]byte{0x2})
+ newSha256Addr = common.BytesToAddress([]byte{0x10, 0})
+ sha256Input = hexutil.Bytes([]byte("hello"))
+ args = TransactionArgs{
+ From: &accounts[0].addr,
+ To: &newSha256Addr,
+ Data: &sha256Input,
+ }
+ overrides = &override.StateOverride{
+ sha256Addr: override.OverrideAccount{
+ MovePrecompileTo: &newSha256Addr,
+ },
+ }
+ )
+ gas, err := api.EstimateGas(context.Background(), args, nil, overrides, nil)
+ if err != nil {
+ t.Fatalf("EstimateGas failed: %v", err)
+ }
+ if gas != 21366 {
+ t.Fatalf("mismatched gas: %d, want 21366", gas)
+ }
+}
+
+func TestEIP7910Config(t *testing.T) {
+ var (
+ newUint64 = func(val uint64) *uint64 { return &val }
+ // Define a snapshot of the current Hoodi config (only Prague scheduled) so that future forks do not
+ // cause this test to fail.
+ config = ¶ms.ChainConfig{
+ ChainID: big.NewInt(560048),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ DAOForkSupport: true,
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: nil,
+ GrayGlacierBlock: nil,
+ TerminalTotalDifficulty: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ ShanghaiTime: newUint64(0),
+ CancunTime: newUint64(0),
+ PragueTime: newUint64(1742999832),
+ DepositContractAddress: common.HexToAddress("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
+ Ethash: new(params.EthashConfig),
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ },
+ }
+ )
+ gspec := core.DefaultHoodiGenesisBlock()
+ gspec.Config = config
+
+ var testSuite = []struct {
+ time uint64
+ file string
+ }{
+ {
+ time: 0,
+ file: "next-and-last",
+ },
+ {
+ time: *gspec.Config.PragueTime,
+ file: "current",
+ },
+ }
+
+ for i, tt := range testSuite {
+ backend := configTimeBackend{nil, gspec, tt.time}
+ api := NewBlockChainAPI(backend)
+ result, err := api.Config(context.Background())
+ if err != nil {
+ t.Errorf("test %d: want no error, have %v", i, err)
+ continue
+ }
+ testRPCResponseWithFile(t, i, result, "eth_config", tt.file)
+ }
+}
+
+type configTimeBackend struct {
+ *testBackend
+ genesis *core.Genesis
+ time uint64
+}
+
+func (b configTimeBackend) ChainConfig() *params.ChainConfig {
+ return b.genesis.Config
+}
+
+func (b configTimeBackend) HeaderByNumber(_ context.Context, n rpc.BlockNumber) (*types.Header, error) {
+ if n == 0 {
+ return b.genesis.ToBlock().Header(), nil
+ }
+ panic("not implemented")
+}
+
+func (b configTimeBackend) CurrentHeader() *types.Header {
+ return &types.Header{Time: b.time}
+}
diff --git a/internal/ethapi/override/override_test.go b/internal/ethapi/override/override_test.go
index 02a17c1331..6feafaac75 100644
--- a/internal/ethapi/override/override_test.go
+++ b/internal/ethapi/override/override_test.go
@@ -35,6 +35,10 @@ func (p *precompileContract) RequiredGas(input []byte) uint64 { return 0 }
func (p *precompileContract) Run(input []byte) ([]byte, error) { return nil, nil }
+func (p *precompileContract) Name() string {
+ panic("implement me")
+}
+
func TestStateOverrideMovePrecompile(t *testing.T) {
db := state.NewDatabase(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil), nil)
statedb, err := state.New(types.EmptyRootHash, db)
diff --git a/internal/ethapi/testdata/eth_config-current.json b/internal/ethapi/testdata/eth_config-current.json
new file mode 100644
index 0000000000..0597c23e39
--- /dev/null
+++ b/internal/ethapi/testdata/eth_config-current.json
@@ -0,0 +1,40 @@
+{
+ "current": {
+ "activationTime": 1742999832,
+ "blobSchedule": {
+ "target": 6,
+ "max": 9,
+ "baseFeeUpdateFraction": 5007716
+ },
+ "chainId": "0x88bb0",
+ "forkId": "0x0929e24e",
+ "precompiles": {
+ "BLAKE2F": "0x0000000000000000000000000000000000000009",
+ "BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
+ "BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
+ "BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
+ "BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
+ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
+ "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
+ "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
+ "BN254_ADD": "0x0000000000000000000000000000000000000006",
+ "BN254_MUL": "0x0000000000000000000000000000000000000007",
+ "BN254_PAIRING": "0x0000000000000000000000000000000000000008",
+ "ECREC": "0x0000000000000000000000000000000000000001",
+ "ID": "0x0000000000000000000000000000000000000004",
+ "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
+ "MODEXP": "0x0000000000000000000000000000000000000005",
+ "RIPEMD160": "0x0000000000000000000000000000000000000003",
+ "SHA256": "0x0000000000000000000000000000000000000002"
+ },
+ "systemContracts": {
+ "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
+ "CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
+ "DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
+ "HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
+ "WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
+ }
+ },
+ "next": null,
+ "last": null
+}
diff --git a/internal/ethapi/testdata/eth_config-next-and-last.json b/internal/ethapi/testdata/eth_config-next-and-last.json
new file mode 100644
index 0000000000..81869ba174
--- /dev/null
+++ b/internal/ethapi/testdata/eth_config-next-and-last.json
@@ -0,0 +1,99 @@
+{
+ "current": {
+ "activationTime": 0,
+ "blobSchedule": {
+ "baseFeeUpdateFraction": 3338477,
+ "max": 6,
+ "target": 3
+ },
+ "chainId": "0x88bb0",
+ "forkId": "0xbef71d30",
+ "precompiles": {
+ "BLAKE2F": "0x0000000000000000000000000000000000000009",
+ "BN254_ADD": "0x0000000000000000000000000000000000000006",
+ "BN254_MUL": "0x0000000000000000000000000000000000000007",
+ "BN254_PAIRING": "0x0000000000000000000000000000000000000008",
+ "ECREC": "0x0000000000000000000000000000000000000001",
+ "ID": "0x0000000000000000000000000000000000000004",
+ "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
+ "MODEXP": "0x0000000000000000000000000000000000000005",
+ "RIPEMD160": "0x0000000000000000000000000000000000000003",
+ "SHA256": "0x0000000000000000000000000000000000000002"
+ },
+ "systemContracts": {
+ "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02"
+ }
+ },
+ "next": {
+ "activationTime": 1742999832,
+ "blobSchedule": {
+ "baseFeeUpdateFraction": 5007716,
+ "max": 9,
+ "target": 6
+ },
+ "chainId": "0x88bb0",
+ "forkId": "0x0929e24e",
+ "precompiles": {
+ "BLAKE2F": "0x0000000000000000000000000000000000000009",
+ "BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
+ "BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
+ "BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
+ "BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
+ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
+ "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
+ "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
+ "BN254_ADD": "0x0000000000000000000000000000000000000006",
+ "BN254_MUL": "0x0000000000000000000000000000000000000007",
+ "BN254_PAIRING": "0x0000000000000000000000000000000000000008",
+ "ECREC": "0x0000000000000000000000000000000000000001",
+ "ID": "0x0000000000000000000000000000000000000004",
+ "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
+ "MODEXP": "0x0000000000000000000000000000000000000005",
+ "RIPEMD160": "0x0000000000000000000000000000000000000003",
+ "SHA256": "0x0000000000000000000000000000000000000002"
+ },
+ "systemContracts": {
+ "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
+ "CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
+ "DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
+ "HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
+ "WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
+ }
+ },
+ "last": {
+ "activationTime": 1742999832,
+ "blobSchedule": {
+ "baseFeeUpdateFraction": 5007716,
+ "max": 9,
+ "target": 6
+ },
+ "chainId": "0x88bb0",
+ "forkId": "0x0929e24e",
+ "precompiles": {
+ "BLAKE2F": "0x0000000000000000000000000000000000000009",
+ "BLS12_G1ADD": "0x000000000000000000000000000000000000000b",
+ "BLS12_G1MSM": "0x000000000000000000000000000000000000000c",
+ "BLS12_G2ADD": "0x000000000000000000000000000000000000000d",
+ "BLS12_G2MSM": "0x000000000000000000000000000000000000000e",
+ "BLS12_MAP_FP2_TO_G2": "0x0000000000000000000000000000000000000011",
+ "BLS12_MAP_FP_TO_G1": "0x0000000000000000000000000000000000000010",
+ "BLS12_PAIRING_CHECK": "0x000000000000000000000000000000000000000f",
+ "BN254_ADD": "0x0000000000000000000000000000000000000006",
+ "BN254_MUL": "0x0000000000000000000000000000000000000007",
+ "BN254_PAIRING": "0x0000000000000000000000000000000000000008",
+ "ECREC": "0x0000000000000000000000000000000000000001",
+ "ID": "0x0000000000000000000000000000000000000004",
+ "KZG_POINT_EVALUATION": "0x000000000000000000000000000000000000000a",
+ "MODEXP": "0x0000000000000000000000000000000000000005",
+ "RIPEMD160": "0x0000000000000000000000000000000000000003",
+ "SHA256": "0x0000000000000000000000000000000000000002"
+ },
+ "systemContracts": {
+ "BEACON_ROOTS_ADDRESS": "0x000f3df6d732807ef1319fb7b8bb8522d0beac02",
+ "CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS": "0x0000bbddc7ce488642fb579f8b00f3a590007251",
+ "DEPOSIT_CONTRACT_ADDRESS": "0x00000000219ab540356cbb839cbe05303d7705fa",
+ "HISTORY_STORAGE_ADDRESS": "0x0000f90827f1c53a10cb7a02335b175320002935",
+ "WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS": "0x00000961ef480eb55e80d19ad83579a64c007002"
+ }
+ }
+}
diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json
index 56de19c297..6cbd783e66 100644
--- a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json
+++ b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending-fullTx.json
@@ -1,49 +1,40 @@
{
- "difficulty": "0x0",
+ "baseFeePerGas": "0xde56ab3",
+ "difficulty": "0x20000",
"extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x5208",
"hash": null,
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"miner": null,
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": null,
"number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "parentHash": "0xa063415a5020f1569fae73ecb0d37bc5649ebe86d59e764a389eb37814bd42cb",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x256",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
+ "size": "0x26a",
+ "stateRoot": "0xce0e05397e548614a5b93254662174329466f8f4b1b391eb36fec9a7a591e58e",
+ "timestamp": "0x6e",
"transactions": [
{
- "blockHash": "0x6cebd9f966ea686f44b981685e3f0eacea28591a7a86d7fbbe521a86e9f81165",
+ "blockHash": "0xfda6c7cb7a3a712e0c424909a7724cab0448e89e286617fa8d5fd27f63f28bd2",
"blockNumber": "0xb",
- "from": "0x0000000000000000000000000000000000000000",
- "gas": "0x457",
- "gasPrice": "0x2b67",
- "hash": "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298",
- "input": "0x111111",
- "nonce": "0xb",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gas": "0x5208",
+ "gasPrice": "0xde56ab3",
+ "hash": "0xd773fbb47ec87b1a958ac16430943ddf2797ecae2b33fe7b16ddb334e30325ed",
+ "input": "0x",
+ "nonce": "0xa",
"to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
"transactionIndex": "0x0",
- "value": "0x6f",
+ "value": "0x3e8",
"type": "0x0",
- "chainId": "0x1",
- "v": "0x0",
- "r": "0x0",
- "s": "0x0"
+ "v": "0x1c",
+ "r": "0xfa029dacd66238d20cd649fe3b323bb458d2cfa4af7db0ff4f6b3e1039bc320a",
+ "s": "0x52fb4d45c1d623f2f05508bae063a4728761d762ae45b8b0908ffea546f3d95e"
}
],
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "uncles": [],
- "withdrawals": [
- {
- "index": "0x0",
- "validatorIndex": "0x1",
- "address": "0x1234000000000000000000000000000000000000",
- "amount": "0xa"
- }
- ],
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84"
-}
\ No newline at end of file
+ "transactionsRoot": "0x59abb8ec0655f66e66450d1502618bc64022ae2d2950fa471eec6e8da2846264",
+ "uncles": []
+}
diff --git a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json
index dda2d93213..3254482cd9 100644
--- a/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json
+++ b/internal/ethapi/testdata/eth_getBlockByNumber-tag-pending.json
@@ -1,32 +1,24 @@
{
- "difficulty": "0x0",
+ "baseFeePerGas": "0xde56ab3",
+ "difficulty": "0x20000",
"extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x5208",
"hash": null,
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"miner": null,
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": null,
"number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "parentHash": "0xa063415a5020f1569fae73ecb0d37bc5649ebe86d59e764a389eb37814bd42cb",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "size": "0x256",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
+ "size": "0x26a",
+ "stateRoot": "0xce0e05397e548614a5b93254662174329466f8f4b1b391eb36fec9a7a591e58e",
+ "timestamp": "0x6e",
"transactions": [
- "0x4afee081df5dff7a025964032871f7d4ba4d21baf5f6376a2f4a9f79fc506298"
+ "0xd773fbb47ec87b1a958ac16430943ddf2797ecae2b33fe7b16ddb334e30325ed"
],
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "uncles": [],
- "withdrawals": [
- {
- "index": "0x0",
- "validatorIndex": "0x1",
- "address": "0x1234000000000000000000000000000000000000",
- "amount": "0xa"
- }
- ],
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84"
+ "transactionsRoot": "0x59abb8ec0655f66e66450d1502618bc64022ae2d2950fa471eec6e8da2846264",
+ "uncles": []
}
\ No newline at end of file
diff --git a/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json b/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json
new file mode 100644
index 0000000000..75f9f3ad99
--- /dev/null
+++ b/internal/ethapi/testdata/eth_getBlockReceipts-tag-pending.json
@@ -0,0 +1,18 @@
+[
+ {
+ "blockHash": "0xc74cf882395ec92eec3673d93a57f9a3bf1a5e696fae3e52f252059af62756c8",
+ "blockNumber": "0x7",
+ "contractAddress": null,
+ "cumulativeGasUsed": "0x5208",
+ "effectiveGasPrice": "0x17b07ddf",
+ "from": "0x703c4b2bd70c169f5717101caee543299fc946c7",
+ "gasUsed": "0x5208",
+ "logs": [],
+ "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x1",
+ "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e",
+ "transactionHash": "0xa7eeffe8111539a8f9725eb4d49e341efa1287d33190300adab220929daa5fac",
+ "transactionIndex": "0x0",
+ "type": "0x0"
+ }
+]
\ No newline at end of file
diff --git a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json
index 289ff5fece..e4121824ef 100644
--- a/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json
+++ b/internal/ethapi/testdata/eth_getHeaderByNumber-tag-pending.json
@@ -1,19 +1,19 @@
{
- "difficulty": "0x0",
+ "baseFeePerGas": "0xde56ab3",
+ "difficulty": "0x20000",
"extraData": "0x",
- "gasLimit": "0x0",
- "gasUsed": "0x0",
+ "gasLimit": "0x47e7c4",
+ "gasUsed": "0x5208",
"hash": null,
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"miner": null,
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": null,
"number": "0xb",
- "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
+ "parentHash": "0xa063415a5020f1569fae73ecb0d37bc5649ebe86d59e764a389eb37814bd42cb",
+ "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
- "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
- "timestamp": "0x2a",
- "transactionsRoot": "0x98d9f6dd0aa479c0fb448f2627e9f1964aca699fccab8f6e95861547a4699e37",
- "withdrawalsRoot": "0x73d756269cdfc22e7e17a3548e36f42f750ca06d7e3cd98d1b6d0eb5add9dc84"
+ "stateRoot": "0xce0e05397e548614a5b93254662174329466f8f4b1b391eb36fec9a7a591e58e",
+ "timestamp": "0x6e",
+ "transactionsRoot": "0x59abb8ec0655f66e66450d1502618bc64022ae2d2950fa471eec6e8da2846264"
}
\ No newline at end of file
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 6b094721e4..f80ef6d080 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -70,9 +70,6 @@ type TransactionArgs struct {
// For SetCodeTxType
AuthorizationList []types.SetCodeAuthorization `json:"authorizationList"`
-
- // This configures whether blobs are allowed to be passed.
- blobSidecarAllowed bool
}
// from retrieves the transaction sender address.
@@ -94,9 +91,17 @@ func (args *TransactionArgs) data() []byte {
return nil
}
+// sidecarConfig defines the options for deriving missing fields of transactions.
+type sidecarConfig struct {
+ // This configures whether blobs are allowed to be passed and
+ // the associated sidecar version should be attached.
+ blobSidecarAllowed bool
+ blobSidecarVersion byte
+}
+
// setDefaults fills in default values for unspecified tx fields.
-func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGasEstimation bool) error {
- if err := args.setBlobTxSidecar(ctx); err != nil {
+func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, config sidecarConfig) error {
+ if err := args.setBlobTxSidecar(ctx, config); err != nil {
return err
}
if err := args.setFeeDefaults(ctx, b, b.CurrentHeader()); err != nil {
@@ -119,11 +124,10 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGas
// BlobTx fields
if args.BlobHashes != nil && len(args.BlobHashes) == 0 {
- return errors.New(`need at least 1 blob for a blob transaction`)
+ return errors.New("need at least 1 blob for a blob transaction")
}
- maxBlobs := eip4844.MaxBlobsPerBlock(b.ChainConfig(), b.CurrentHeader().Time)
- if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobs {
- return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobs)
+ if args.BlobHashes != nil && len(args.BlobHashes) > params.BlobTxMaxBlobs {
+ return fmt.Errorf("too many blobs in transaction (have=%d, max=%d)", len(args.BlobHashes), params.BlobTxMaxBlobs)
}
// create check
@@ -137,36 +141,28 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGas
}
if args.Gas == nil {
- if skipGasEstimation { // Skip gas usage estimation if a precise gas limit is not critical, e.g., in non-transaction calls.
- gas := hexutil.Uint64(b.RPCGasCap())
- if gas == 0 {
- gas = hexutil.Uint64(math.MaxUint64 / 2)
- }
- args.Gas = &gas
- } else { // Estimate the gas usage otherwise.
- // These fields are immutable during the estimation, safe to
- // pass the pointer directly.
- data := args.data()
- callArgs := TransactionArgs{
- From: args.From,
- To: args.To,
- GasPrice: args.GasPrice,
- MaxFeePerGas: args.MaxFeePerGas,
- MaxPriorityFeePerGas: args.MaxPriorityFeePerGas,
- Value: args.Value,
- Data: (*hexutil.Bytes)(&data),
- AccessList: args.AccessList,
- BlobFeeCap: args.BlobFeeCap,
- BlobHashes: args.BlobHashes,
- }
- latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
- estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, nil, b.RPCGasCap())
- if err != nil {
- return err
- }
- args.Gas = &estimated
- log.Trace("Estimate gas usage automatically", "gas", args.Gas)
+ // These fields are immutable during the estimation, safe to
+ // pass the pointer directly.
+ data := args.data()
+ callArgs := TransactionArgs{
+ From: args.From,
+ To: args.To,
+ GasPrice: args.GasPrice,
+ MaxFeePerGas: args.MaxFeePerGas,
+ MaxPriorityFeePerGas: args.MaxPriorityFeePerGas,
+ Value: args.Value,
+ Data: (*hexutil.Bytes)(&data),
+ AccessList: args.AccessList,
+ BlobFeeCap: args.BlobFeeCap,
+ BlobHashes: args.BlobHashes,
+ }
+ latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, nil, b.RPCGasCap())
+ if err != nil {
+ return err
}
+ args.Gas = &estimated
+ log.Trace("Estimated gas usage automatically", "gas", args.Gas)
}
// If chain id is provided, ensure it matches the local chain id. Otherwise, set the local
@@ -283,18 +279,17 @@ func (args *TransactionArgs) setLondonFeeDefaults(ctx context.Context, head *typ
}
// setBlobTxSidecar adds the blob tx
-func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context) error {
+func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context, config sidecarConfig) error {
// No blobs, we're done.
if args.Blobs == nil {
return nil
}
// Passing blobs is not allowed in all contexts, only in specific methods.
- if !args.blobSidecarAllowed {
+ if !config.blobSidecarAllowed {
return errors.New(`"blobs" is not supported for this RPC method`)
}
- n := len(args.Blobs)
// Assume user provides either only blobs (w/o hashes), or
// blobs together with commitments and proofs.
if args.Commitments == nil && args.Proofs != nil {
@@ -303,43 +298,77 @@ func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context) error {
return errors.New(`blob commitments provided while proofs were not`)
}
- // len(blobs) == len(commitments) == len(proofs) == len(hashes)
+ // len(blobs) == len(commitments) == len(hashes)
+ n := len(args.Blobs)
+ if args.BlobHashes != nil && len(args.BlobHashes) != n {
+ return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
+ }
if args.Commitments != nil && len(args.Commitments) != n {
return fmt.Errorf("number of blobs and commitments mismatch (have=%d, want=%d)", len(args.Commitments), n)
}
- if args.Proofs != nil && len(args.Proofs) != n {
- return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), n)
+
+ // if V0: len(blobs) == len(proofs)
+ // if V1: len(blobs) == len(proofs) * 128
+ proofLen := n
+ if config.blobSidecarVersion == types.BlobSidecarVersion1 {
+ proofLen = n * kzg4844.CellProofsPerBlob
}
- if args.BlobHashes != nil && len(args.BlobHashes) != n {
- return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
+ if args.Proofs != nil && len(args.Proofs) != proofLen {
+ if len(args.Proofs) != n {
+ return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), proofLen)
+ }
+ // Unset the commitments and proofs, as they may be submitted in the legacy format
+ log.Debug("Unset legacy commitments and proofs", "blobs", n, "proofs", len(args.Proofs))
+ args.Commitments, args.Proofs = nil, nil
}
+ // Generate commitments and proofs if they are missing, or validate them if they
+ // are provided.
if args.Commitments == nil {
- // Generate commitment and proof.
- commitments := make([]kzg4844.Commitment, n)
- proofs := make([]kzg4844.Proof, n)
+ var (
+ commitments = make([]kzg4844.Commitment, n)
+ proofs = make([]kzg4844.Proof, 0, proofLen)
+ )
for i, b := range args.Blobs {
c, err := kzg4844.BlobToCommitment(&b)
if err != nil {
return fmt.Errorf("blobs[%d]: error computing commitment: %v", i, err)
}
commitments[i] = c
- p, err := kzg4844.ComputeBlobProof(&b, c)
- if err != nil {
- return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
+
+ switch config.blobSidecarVersion {
+ case types.BlobSidecarVersion0:
+ p, err := kzg4844.ComputeBlobProof(&b, c)
+ if err != nil {
+ return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
+ }
+ proofs = append(proofs, p)
+ case types.BlobSidecarVersion1:
+ ps, err := kzg4844.ComputeCellProofs(&b)
+ if err != nil {
+ return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
+ }
+ proofs = append(proofs, ps...)
}
- proofs[i] = p
}
args.Commitments = commitments
args.Proofs = proofs
} else {
- for i, b := range args.Blobs {
- if err := kzg4844.VerifyBlobProof(&b, args.Commitments[i], args.Proofs[i]); err != nil {
+ switch config.blobSidecarVersion {
+ case types.BlobSidecarVersion0:
+ for i, b := range args.Blobs {
+ if err := kzg4844.VerifyBlobProof(&b, args.Commitments[i], args.Proofs[i]); err != nil {
+ return fmt.Errorf("failed to verify blob proof: %v", err)
+ }
+ }
+ case types.BlobSidecarVersion1:
+ if err := kzg4844.VerifyCellProofs(args.Blobs, args.Commitments, args.Proofs); err != nil {
return fmt.Errorf("failed to verify blob proof: %v", err)
}
}
}
+ // Generate blob hashes if they are missing, or validate them if they are provided.
hashes := make([]common.Hash, n)
hasher := sha256.New()
for i, c := range args.Commitments {
@@ -527,8 +556,11 @@ func (args *TransactionArgs) ToTransaction(defaultType int) *types.Transaction {
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
}
if args.Blobs != nil {
- // TODO(rjl493456442, marius) support V1
- data.(*types.BlobTx).Sidecar = types.NewBlobTxSidecar(types.BlobSidecarVersion0, args.Blobs, args.Commitments, args.Proofs)
+ version := types.BlobSidecarVersion0
+ if len(args.Proofs) == len(args.Blobs)*kzg4844.CellProofsPerBlob {
+ version = types.BlobSidecarVersion1
+ }
+ data.(*types.BlobTx).Sidecar = types.NewBlobTxSidecar(version, args.Blobs, args.Commitments, args.Proofs)
}
case types.DynamicFeeTxType:
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 52331ef9e2..3b91fa1f0b 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -19,14 +19,16 @@ package ethapi
import (
"context"
"errors"
- "github.com/ethereum/go-ethereum/internal/xproto/rollup/v1"
- "github.com/ethereum/go-ethereum/core/ssv"
+ "github.com/compose-network/specs/compose/proto"
+
"math/big"
"reflect"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/core/ssv"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
@@ -264,21 +266,6 @@ type backendMock struct {
config *params.ChainConfig
}
-func (b *backendMock) SimulateTransaction(ctx context.Context, tx *types.Transaction, blockNrOrHash rpc.BlockNumberOrHash) (*ssv.SSVTraceResult, error) {
- //TODO implement me
- panic("implement me")
-}
-
-func (b *backendMock) HandleSPMessage(ctx context.Context, msg *rollupv1.Message) ([]common.Hash, error) {
- //TODO implement me
- panic("implement me")
-}
-
-func (b *backendMock) GetMailboxAddresses() []common.Address {
- //TODO implement me
- panic("implement me")
-}
-
func newBackendMock() *backendMock {
var cancunTime uint64 = 600
config := ¶ms.ChainConfig{
@@ -313,6 +300,21 @@ func newBackendMock() *backendMock {
}
}
+func (b *backendMock) SimulateTransaction(ctx context.Context, tx *types.Transaction, blockNrOrHash rpc.BlockNumberOrHash) (*ssv.SSVTraceResult, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (b *backendMock) HandleSPMessage(context.Context, *proto.Message) ([]common.Hash, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (b *backendMock) GetMailboxAddresses() []common.Address {
+ //TODO implement me
+ panic("implement me")
+}
+
func (b *backendMock) setFork(fork string) error {
if fork == "legacy" {
b.current.Number = big.NewInt(900)
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 8ce414c57b..6dd9efd685 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -217,11 +217,6 @@ web3._extend({
call: 'debug_setHead',
params: 1
}),
- new web3._extend.Method({
- name: 'seedHash',
- call: 'debug_seedHash',
- params: 1
- }),
new web3._extend.Method({
name: 'dumpBlock',
call: 'debug_dumpBlock',
@@ -600,6 +595,11 @@ web3._extend({
call: 'eth_getBlockReceipts',
params: 1,
}),
+ new web3._extend.Method({
+ name: 'config',
+ call: 'eth_config',
+ params: 0,
+ })
],
properties: [
new web3._extend.Property({
diff --git a/metrics/cputime_nop.go b/metrics/cputime_nop.go
index 465d88c4d2..a6285ec10a 100644
--- a/metrics/cputime_nop.go
+++ b/metrics/cputime_nop.go
@@ -14,8 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:build windows || js
-// +build windows js
+//go:build windows || js || tinygo
+// +build windows js tinygo
package metrics
diff --git a/metrics/cputime_unix.go b/metrics/cputime_unix.go
index a44bf80876..5db38b16a2 100644
--- a/metrics/cputime_unix.go
+++ b/metrics/cputime_unix.go
@@ -14,8 +14,8 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-//go:build !windows && !js && !wasip1
-// +build !windows,!js,!wasip1
+//go:build !windows && !js && !wasip1 && !tinygo
+// +build !windows,!js,!wasip1,!tinygo
package metrics
diff --git a/metrics/gauge.go b/metrics/gauge.go
index 909fca1304..4f93e22487 100644
--- a/metrics/gauge.go
+++ b/metrics/gauge.go
@@ -45,6 +45,7 @@ func (g *Gauge) Update(v int64) {
(*atomic.Int64)(g).Store(v)
}
+// OPStack addition
// TryUpdate updates the gauge if the value is non-nil, converting it to int64.
func (g *Gauge) TryUpdate(v *big.Int) {
if v == nil {
@@ -53,6 +54,15 @@ func (g *Gauge) TryUpdate(v *big.Int) {
(*atomic.Int64)(g).Store(v.Int64())
}
+// OPStack additon
+// TryUpdate updates the gauge if the value is non-nil, converting it to int64.
+func (g *Gauge) TryUpdateUint64(v *uint64) {
+ if v == nil {
+ return
+ }
+ (*atomic.Int64)(g).Store(int64(*v))
+}
+
// UpdateIfGt updates the gauge's value if v is larger then the current value.
func (g *Gauge) UpdateIfGt(v int64) {
value := (*atomic.Int64)(g)
diff --git a/metrics/runtimehistogram.go b/metrics/runtimehistogram.go
index 53904b2b28..0ab8914602 100644
--- a/metrics/runtimehistogram.go
+++ b/metrics/runtimehistogram.go
@@ -14,7 +14,7 @@ func getOrRegisterRuntimeHistogram(name string, scale float64, r Registry) *runt
// runtimeHistogram wraps a runtime/metrics histogram.
type runtimeHistogram struct {
- v atomic.Value // v is a pointer to a metrics.Float64Histogram
+ v atomic.Pointer[metrics.Float64Histogram]
scaleFactor float64
}
@@ -58,7 +58,7 @@ func (h *runtimeHistogram) Update(int64) {
// Snapshot returns a non-changing copy of the histogram.
func (h *runtimeHistogram) Snapshot() HistogramSnapshot {
- hist := h.v.Load().(*metrics.Float64Histogram)
+ hist := h.v.Load()
return newRuntimeHistogramSnapshot(hist)
}
diff --git a/miner/miner.go b/miner/miner.go
index 5719d04a43..1412c50a44 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -300,7 +300,6 @@ func (miner *Miner) getPending(ctx context.Context) *newPayloadResult {
return cached
}
}
-
var (
timestamp = uint64(time.Now().Unix())
withdrawal types.Withdrawals
diff --git a/miner/miner_optimism_test.go b/miner/miner_optimism_test.go
new file mode 100644
index 0000000000..4be405f159
--- /dev/null
+++ b/miner/miner_optimism_test.go
@@ -0,0 +1,152 @@
+package miner
+
+import (
+ "encoding/binary"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/beacon"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/stretchr/testify/require"
+)
+
+const testDAFootprintGasScalar = 400
+
+// TestDAFootprintMining tests that the miner correctly limits the DA footprint of the block.
+// It builds a block via the miner from txpool
+// transactions and then imports the block into the chain, asserting that
+// execution succeeds.
+func TestDAFootprintMining(t *testing.T) {
+ requirePreJovianBehavior := func(t *testing.T, block *types.Block, receipts []*types.Receipt) {
+ var txGas uint64
+ for _, receipt := range receipts {
+ txGas += receipt.GasUsed
+ }
+ require.Equal(t, txGas, block.GasUsed(), "total tx gas used should be equal to block gas used")
+ require.Zero(t, *block.Header().BlobGasUsed, "expected 0 blob gas used")
+ }
+
+ requireLargeDAFootprintBehavior := func(t *testing.T, block *types.Block, receipts []*types.Receipt) {
+ var (
+ txGas uint64
+ daFootprint uint64
+ txs = block.Transactions()
+ )
+
+ require.Equal(t, len(receipts), len(txs))
+
+ for i, receipt := range receipts {
+ txGas += receipt.GasUsed
+ if txs[i].IsDepositTx() {
+ continue
+ }
+ daFootprint += txs[i].RollupCostData().EstimatedDASize().Uint64() * testDAFootprintGasScalar
+ }
+ require.Equal(t, txGas, block.GasUsed(), "total tx gas used should be equal to block gas used")
+ require.Greater(t, daFootprint, block.GasUsed(), "total DA footprint used should be greater than block gas used")
+ require.LessOrEqual(t, daFootprint, block.GasLimit(), "total DA footprint used should be less or equal block gas limit")
+ }
+ t.Run("jovian-one-min-tx", func(t *testing.T) {
+ testMineAndExecute(t, 0, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) {
+ require.Len(t, receipts, 2) // 1 test pending tx and 1 deposit tx
+ requireLargeDAFootprintBehavior(t, block, receipts)
+
+ // Double-confirm DA footprint calculation manually in this simple transaction case.
+ daFootprint, err := types.CalcDAFootprint(block.Transactions())
+ require.NoError(t, err, "failed to calculate DA footprint")
+ require.Equal(t, daFootprint, *block.Header().BlobGasUsed,
+ "header blob gas used should match calculated DA footprint")
+ require.Equal(t, testDAFootprintGasScalar*types.MinTransactionSize.Uint64(), daFootprint,
+ "simple pending transaction should lead to min DA footprint")
+ })
+ })
+ t.Run("jovian-at-limit", func(t *testing.T) {
+ testMineAndExecute(t, 17, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) {
+ require.Len(t, receipts, 19) // including 1 test pending tx and 1 deposit tx
+ requireLargeDAFootprintBehavior(t, block, receipts)
+ })
+ })
+ t.Run("jovian-above-limit", func(t *testing.T) {
+ testMineAndExecute(t, 18, jovianConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) {
+ require.Len(t, receipts, 19) // same as for 17, because 18th tx from pool shouldn't have been included
+ requireLargeDAFootprintBehavior(t, block, receipts)
+ })
+ })
+ t.Run("isthmus", func(t *testing.T) {
+ testMineAndExecute(t, 39, isthmusConfig(), func(t *testing.T, _ *core.BlockChain, block *types.Block, receipts []*types.Receipt) {
+ require.Len(t, receipts, 41) // including 1 test pending tx and 1 deposit tx
+ requirePreJovianBehavior(t, block, receipts)
+ })
+ })
+
+ t.Run("jovian-invalid-blobGasUsed", func(t *testing.T) {
+ testMineAndExecute(t, 0, jovianConfig(), func(t *testing.T, bc *core.BlockChain, block *types.Block, receipts []*types.Receipt) {
+ require.Len(t, receipts, 2) // 1 test pending tx and 1 deposit tx
+ header := block.Header()
+ *header.BlobGasUsed += 1 // invalidate blobGasUsed
+ invalidBlock := block.WithSeal(header)
+ _, err := bc.InsertChain(types.Blocks{invalidBlock})
+ require.ErrorContains(t, err, "invalid DA footprint in blobGasUsed field (remote: 40001 local: 40000)")
+ })
+ })
+}
+
+func testMineAndExecute(t *testing.T, numTxs uint64, cfg *params.ChainConfig, assertFn func(*testing.T, *core.BlockChain, *types.Block, []*types.Receipt)) {
+ db := rawdb.NewMemoryDatabase()
+ w, b := newTestWorker(t, cfg, beacon.New(ethash.NewFaker()), db, 0)
+
+ // Start from nonce 1 to avoid colliding with the preloaded pending tx.
+ txs := genTxs(1, numTxs)
+
+ // Add to txpool for the miner to pick up.
+ if errs := b.txPool.Add(txs, false); len(errs) > 0 {
+ for _, err := range errs {
+ require.NoError(t, err, "failed adding tx to pool")
+ }
+ }
+
+ parent := b.chain.CurrentBlock()
+ ts := parent.Time + 12
+ dtx := new(types.DepositTx)
+ if cfg.IsDAFootprintBlockLimit(parent.Time) {
+ dtx = jovianDepositTx(testDAFootprintGasScalar)
+ }
+
+ genParams := &generateParams{
+ parentHash: b.chain.CurrentBlock().Hash(),
+ timestamp: ts,
+ withdrawals: types.Withdrawals{},
+ beaconRoot: new(common.Hash),
+ gasLimit: ptr(uint64(1e6)), // Small gas limit to easily fill block
+ txs: types.Transactions{types.NewTx(dtx)},
+ eip1559Params: eip1559.EncodeHolocene1559Params(250, 6),
+ }
+ if cfg.IsMinBaseFee(ts) {
+ genParams.minBaseFee = new(uint64)
+ }
+ r := w.generateWork(genParams, false)
+ require.NoError(t, r.err, "block generation failed")
+ require.NotNil(t, r.block, "no block generated")
+
+ assertFn(t, b.chain, r.block, r.receipts)
+
+ // Import the block into the chain, which executes it via StateProcessor.
+ _, err := b.chain.InsertChain(types.Blocks{r.block})
+ require.NoError(t, err, "block import/execution failed")
+}
+
+func jovianDepositTx(daFootprintGasScalar uint16) *types.DepositTx {
+ data := make([]byte, types.JovianL1AttributesLen)
+ copy(data[0:4], types.JovianL1AttributesSelector)
+ binary.BigEndian.PutUint16(data[types.JovianL1AttributesLen-2:types.JovianL1AttributesLen], daFootprintGasScalar)
+ return &types.DepositTx{Data: data}
+}
+
+func ptr[T any](v T) *T {
+ return &v
+}
diff --git a/miner/miner_test.go b/miner/miner_test.go
index a0628ecb57..5a691d5291 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -196,7 +196,7 @@ func createMiner(t *testing.T) *Miner {
// Create Miner
backend := NewMockBackend(bc, txpool, false, nil)
- miner := New(backend, config, engine)
+ miner := New(backend, struct{}{}, config, engine)
return miner
}
diff --git a/miner/op_interop_miner_test.go b/miner/op_interop_miner_test.go
index 1a595d87f5..101a9a0efe 100644
--- a/miner/op_interop_miner_test.go
+++ b/miner/op_interop_miner_test.go
@@ -67,7 +67,7 @@ func createInteropMiner(t *testing.T, supervisorInFailsafe bool, queryFailsafeCb
// Create mock backend with interop support
backend := NewMockBackend(bc, txpool, supervisorInFailsafe, queryFailsafeCb)
- miner := New(backend, config, engine)
+ miner := New(backend, struct{}{}, config, engine)
return miner, testBankKey, testBankAddress
}
diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go
index 2867f73bec..6c9dc13826 100644
--- a/miner/payload_building_test.go
+++ b/miner/payload_building_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
@@ -74,7 +75,10 @@ const (
numDAFilterTxs = 256
)
-var zero = uint64(0)
+var (
+ zero = uint64(0)
+ validEIP1559Params = eip1559.EncodeHolocene1559Params(250, 6)
+)
func init() {
testTxPoolConfig = legacypool.DefaultConfig
@@ -118,7 +122,7 @@ type testWorkerBackend struct {
}
func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend {
- var gspec = &core.Genesis{
+ gspec := &core.Genesis{
Config: chainConfig,
Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
}
@@ -127,7 +131,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength)
copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes())
e.Authorize(testBankAddress)
- case *ethash.Ethash:
+ case *ethash.Ethash, *beacon.Beacon:
default:
t.Fatalf("unexpected consensus engine type: %T", engine)
}
@@ -156,7 +160,7 @@ func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool }
func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*Miner, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
backend.txPool.Add(pendingTxs, true)
- w := New(backend, testConfig, engine)
+ w := New(backend, struct{}{}, testConfig, engine)
return w, backend
}
@@ -166,23 +170,42 @@ func TestBuildPayload(t *testing.T) {
// the builder routine
t.Run("with-tx-pool", func(t *testing.T) { testBuildPayload(t, false, false, nil, params.TestChainConfig) })
t.Run("with-tx-pool-interrupt", func(t *testing.T) { testBuildPayload(t, false, true, nil, params.TestChainConfig) })
- params1559 := []byte{0, 1, 2, 3, 4, 5, 6, 7}
- t.Run("with-params-holocene", func(t *testing.T) { testBuildPayload(t, false, false, params1559, holoceneConfig()) })
- t.Run("with-params-no-tx-pool-holocene", func(t *testing.T) { testBuildPayload(t, true, false, params1559, holoceneConfig()) })
- t.Run("with-params-interrupt-holocene", func(t *testing.T) { testBuildPayload(t, false, true, params1559, holoceneConfig()) })
- t.Run("with-params-jovian", func(t *testing.T) { testBuildPayload(t, false, false, params1559, jovianConfig()) })
- t.Run("with-params-no-tx-pool-jovian", func(t *testing.T) { testBuildPayload(t, true, false, params1559, jovianConfig()) })
- t.Run("with-params-interrupt-jovian", func(t *testing.T) { testBuildPayload(t, false, true, params1559, jovianConfig()) })
-
- t.Run("wrong-config-no-params", func(t *testing.T) { testBuildPayloadWrongConfig(t, nil, holoceneConfig()) })
- t.Run("wrong-config-params-holocene", func(t *testing.T) { testBuildPayloadWrongConfig(t, params1559, holoceneConfig()) })
- t.Run("wrong-config-params-jovian", func(t *testing.T) { testBuildPayloadWrongConfig(t, params1559, jovianConfig()) })
+ t.Run("with-params-holocene", func(t *testing.T) { testBuildPayload(t, false, false, validEIP1559Params, holoceneConfig()) })
+ t.Run("with-params-no-tx-pool-holocene", func(t *testing.T) { testBuildPayload(t, true, false, validEIP1559Params, holoceneConfig()) })
+ t.Run("with-params-interrupt-holocene", func(t *testing.T) { testBuildPayload(t, false, true, validEIP1559Params, holoceneConfig()) })
+ t.Run("with-params-jovian", func(t *testing.T) { testBuildPayload(t, false, false, validEIP1559Params, jovianConfig()) })
+ t.Run("with-params-no-tx-pool-jovian", func(t *testing.T) { testBuildPayload(t, true, false, validEIP1559Params, jovianConfig()) })
+ t.Run("with-params-interrupt-jovian", func(t *testing.T) { testBuildPayload(t, false, true, validEIP1559Params, jovianConfig()) })
zeroParams := make([]byte, 8)
t.Run("with-zero-params-holocene", func(t *testing.T) { testBuildPayload(t, true, false, zeroParams, holoceneConfig()) })
t.Run("with-zero-params-jovian", func(t *testing.T) { testBuildPayload(t, true, false, zeroParams, jovianConfig()) })
}
+func TestBuildPayloadError(t *testing.T) {
+ t.Run("pre-holocene-with-params", func(t *testing.T) {
+ cfg := holoceneConfig()
+ cfg.HoloceneTime = nil
+ testBuildPayloadError(t, cfg,
+ "got eip1559 params, expected none",
+ func(args *BuildPayloadArgs) { args.EIP1559Params = validEIP1559Params })
+ })
+ t.Run("holocene-no-params", func(t *testing.T) {
+ testBuildPayloadError(t, holoceneConfig(),
+ "holocene eip-1559 params should be 8 bytes, got 0",
+ func(args *BuildPayloadArgs) { args.EIP1559Params = nil })
+ })
+ t.Run("holocene-bad-params", func(t *testing.T) {
+ testBuildPayloadError(t, holoceneConfig(),
+ "holocene params cannot have a 0 denominator unless elasticity is also 0",
+ func(args *BuildPayloadArgs) { args.EIP1559Params = eip1559.EncodeHolocene1559Params(0, 6) })
+ })
+ t.Run("jovian-no-minbasefee", func(t *testing.T) {
+ testBuildPayloadError(t, jovianConfig(), "missing minBaseFee",
+ func(args *BuildPayloadArgs) { args.MinBaseFee = nil })
+ })
+}
+
func TestDAFilters(t *testing.T) {
// Each test case inserts one pending small (DA cost 100) transaction followed by
// numDAFilterTxs transactions that have random calldata (min DA size >> 100)
@@ -203,50 +226,65 @@ func TestDAFilters(t *testing.T) {
}
func holoceneConfig() *params.ChainConfig {
- config := *params.TestChainConfig
- config.LondonBlock = big.NewInt(0)
- t := uint64(0)
- config.CanyonTime = &t
- config.HoloceneTime = &t
- canyonDenom := uint64(250)
- config.Optimism = ¶ms.OptimismConfig{
- EIP1559Elasticity: 6,
- EIP1559Denominator: 50,
- EIP1559DenominatorCanyon: &canyonDenom,
- }
+ config := *params.OptimismTestConfig
+ config.IsthmusTime = nil
+ config.JovianTime = nil
+ config.PragueTime = nil
+ config.OsakaTime = nil
return &config
}
-func jovianConfig() *params.ChainConfig {
+func isthmusConfig() *params.ChainConfig {
config := holoceneConfig()
- zero := uint64(0)
+ config.IsthmusTime = &zero
+ config.PragueTime = &zero
+ return config
+}
+
+func jovianConfig() *params.ChainConfig {
+ config := isthmusConfig()
config.JovianTime = &zero
return config
}
-// newPayloadArgs returns a BuildPaylooadArgs with the given parentHash, eip-1559 params,
-// minBaseFee, testTimestamp for Timestamp, and testRecipient for recipient. NoTxPool is set to true.
-func newPayloadArgs(parentHash common.Hash, params1559 []byte, minBaseFee *uint64) *BuildPayloadArgs {
- return &BuildPayloadArgs{
- Parent: parentHash,
- Timestamp: testTimestamp,
- Random: common.Hash{},
- FeeRecipient: testRecipient,
- NoTxPool: true,
- EIP1559Params: params1559,
- MinBaseFee: minBaseFee,
+// newPayloadArgs returns valid BuildPaylooadArgs for the given chain config with the given parentHash,
+// testTimestamp for Timestamp, and testRecipient for recipient.
+// OP-Stack chains will have one dummy deposit transaction in Transactions.
+// NoTxPool is set to true.
+// A test can modify individual fields afterwards to enable the transaction
+// pool, create invalid eip-1559 params, minBaseFee, etc.
+func newPayloadArgs(parentHash common.Hash, cfg *params.ChainConfig) *BuildPayloadArgs {
+ args := &BuildPayloadArgs{
+ Parent: parentHash,
+ Timestamp: testTimestamp,
+ FeeRecipient: testRecipient,
+ Withdrawals: types.Withdrawals{},
+ NoTxPool: true,
+ }
+
+ if !cfg.IsOptimism() {
+ return args
+ }
+
+ if cfg.IsHolocene(args.Timestamp) {
+ args.EIP1559Params = validEIP1559Params
+ }
+ dtx := new(types.DepositTx)
+ if cfg.IsDAFootprintBlockLimit(args.Timestamp) {
+ dtx = jovianDepositTx(testDAFootprintGasScalar)
}
+ args.Transactions = []*types.Transaction{types.NewTx(dtx)}
+ if cfg.IsMinBaseFee(args.Timestamp) {
+ args.MinBaseFee = ptr(uint64(1e9))
+ }
+
+ return args
}
func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte, config *params.ChainConfig) {
t.Parallel()
db := rawdb.NewMemoryDatabase()
- var minBaseFee *uint64
- if config.IsOptimismJovian(testTimestamp) {
- val := uint64(1e9)
- minBaseFee = &val
- }
w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0)
const numInterruptTxs = 256
@@ -258,8 +296,9 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte,
b.txPool.Add(txs, false)
}
- args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, minBaseFee)
+ args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config)
args.NoTxPool = noTxPool
+ args.EIP1559Params = params1559
// payload resolution now interrupts block building, so we have to
// wait for the payloading building process to build its first block
@@ -269,6 +308,9 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte,
}
verify := func(outer *engine.ExecutionPayloadEnvelope, txs int) {
t.Helper()
+ if config.IsOptimism() {
+ txs++ // account for dummy deposit tx
+ }
if outer == nil {
t.Fatal("ExecutionPayloadEnvelope is nil")
}
@@ -301,8 +343,8 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte,
var expected []byte
if len(params1559) != 0 {
versionByte := eip1559.HoloceneExtraDataVersionByte
- if config.IsOptimismJovian(testTimestamp) {
- versionByte = eip1559.JovianExtraDataVersionByte
+ if config.IsMinBaseFee(testTimestamp) {
+ versionByte = eip1559.MinBaseFeeExtraDataVersionByte
}
expected = []byte{versionByte}
@@ -312,9 +354,9 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte,
} else {
expected = append(expected, params1559...)
}
- if versionByte == eip1559.JovianExtraDataVersionByte {
+ if versionByte == eip1559.MinBaseFeeExtraDataVersionByte {
buf := make([]byte, 8)
- binary.BigEndian.PutUint64(buf, *minBaseFee)
+ binary.BigEndian.PutUint64(buf, *args.MinBaseFee)
expected = append(expected, buf...)
}
}
@@ -341,7 +383,7 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte,
if e != uint64(expectedElasticity) {
t.Fatalf("elasticity doesn't match. want: %d, got %d", expectedElasticity, e)
}
- require.Equal(t, minBaseFee, extractedMinBaseFee, "minBaseFee doesn't match")
+ require.Equal(t, args.MinBaseFee, extractedMinBaseFee, "minBaseFee doesn't match")
}
if noTxPool {
@@ -377,8 +419,7 @@ func testDAFilters(t *testing.T, maxDATxSize, maxDABlockSize *big.Int, expectedT
txs := genTxs(1, numDAFilterTxs)
b.txPool.Add(txs, false)
- params1559 := []byte{0, 1, 2, 3, 4, 5, 6, 7}
- args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, &zero)
+ args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config)
args.NoTxPool = false
payload, err := w.buildPayload(args, false)
@@ -387,65 +428,25 @@ func testDAFilters(t *testing.T, maxDATxSize, maxDABlockSize *big.Int, expectedT
}
payload.WaitFull()
result := payload.ResolveFull().ExecutionPayload
- if len(result.Transactions) != expectedTxCount {
+ if len(result.Transactions) != expectedTxCount+1 { // account for dummy deposit tx
t.Fatalf("Unexpected transaction set: got %d, expected %d", len(result.Transactions), expectedTxCount)
}
}
-func testBuildPayloadWrongConfig(t *testing.T, params1559 []byte, config *params.ChainConfig) {
+func testBuildPayloadError(t *testing.T, config *params.ChainConfig, expErrStr string, mod func(*BuildPayloadArgs)) {
t.Parallel()
db := rawdb.NewMemoryDatabase()
- wrongConfig := *config
- if len(params1559) != 0 {
- // deactivate holocene and jovian and make sure non-empty params get rejected
- wrongConfig.HoloceneTime = nil
- wrongConfig.JovianTime = nil
- }
- w, b := newTestWorker(t, &wrongConfig, ethash.NewFaker(), db, 0)
-
- args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559, &zero)
- payload, err := w.buildPayload(args, false)
- if err == nil && (payload == nil || payload.err == nil) {
- t.Fatalf("expected error, got none")
- }
-}
-
-func TestBuildPayloadInvalidHoloceneParams(t *testing.T) {
- t.Parallel()
- db := rawdb.NewMemoryDatabase()
- config := holoceneConfig()
- w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0)
-
- // 0 denominators shouldn't be allowed
- badParams := eip1559.EncodeHolocene1559Params(0, 6)
-
- args := newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero)
- payload, err := w.buildPayload(args, false)
- if err == nil && (payload == nil || payload.err == nil) {
- t.Fatalf("expected error, got none")
- }
-}
-
-func TestBuildPayloadInvalidJovianExtraData(t *testing.T) {
- t.Parallel()
- db := rawdb.NewMemoryDatabase()
- config := jovianConfig()
w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0)
- // 0 denominators shouldn't be allowed
- badParams := eip1559.EncodeJovianExtraData(0, 6, 0)
-
- args := newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero)
+ args := newPayloadArgs(b.chain.CurrentBlock().Hash(), config)
+ mod(args)
payload, err := w.buildPayload(args, false)
- if err == nil && (payload == nil || payload.err == nil) {
- t.Fatalf("expected error, got none")
- }
-
- // missing minBaseFee shouldn't be allowed (use Holocene encoder)
- badParams = eip1559.EncodeHoloceneExtraData(250, 6)
- args = newPayloadArgs(b.chain.CurrentBlock().Hash(), badParams, &zero)
- payload, err = w.buildPayload(args, false)
- if err == nil && (payload == nil || payload.err == nil) {
+ require.Nil(t, payload)
+ if err != nil {
+ require.ErrorContains(t, err, expErrStr)
+ } else if payload.err != nil {
+ require.ErrorContains(t, payload.err, expErrStr)
+ } else {
t.Fatalf("expected error, got none")
}
}
@@ -466,7 +467,7 @@ func genTxs(startNonce, count uint64) types.Transactions {
Nonce: nonce,
To: &testUserAddress,
Value: big.NewInt(1000),
- Gas: params.TxGas + uint64(len(randomBytes))*16,
+ Gas: params.TxGas + uint64(len(randomBytes))*40,
GasPrice: big.NewInt(params.InitialBaseFee),
Data: randomBytes,
})
diff --git a/miner/worker.go b/miner/worker.go
index f335abcec3..84e085d709 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -74,6 +74,9 @@ type environment struct {
coinbase common.Address
evm *vm.EVM
+ // OP-Stack addition: DA footprint block limit
+ daFootprintGasScalar uint16
+
header *types.Header
txs []*types.Transaction
receipts []*types.Receipt
@@ -346,10 +349,13 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
}
if genParams.gasLimit != nil { // override gas limit if specified
header.GasLimit = *genParams.gasLimit
- } else if miner.chain.Config().Optimism != nil && miner.config.GasCeil != 0 {
+ } else if miner.chain.Config().IsOptimism() && miner.config.GasCeil != 0 {
// configure the gas limit of pending blocks with the miner gas limit config when using optimism
header.GasLimit = miner.config.GasCeil
}
+ if miner.chainConfig.IsMinBaseFee(header.Time) && genParams.minBaseFee == nil {
+ return nil, errors.New("missing minBaseFee")
+ }
if cfg := miner.chainConfig; cfg.IsHolocene(header.Time) {
if err := eip1559.ValidateHolocene1559Params(genParams.eip1559Params); err != nil {
return nil, err
@@ -390,6 +396,15 @@ func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*envir
return nil, err
}
env.noTxs = genParams.noTxs
+ if miner.chainConfig.IsDAFootprintBlockLimit(parent.Time) {
+ if len(genParams.txs) == 0 || !genParams.txs[0].IsDepositTx() {
+ return nil, errors.New("missing L1 attributes deposit transaction")
+ }
+ env.daFootprintGasScalar, err = types.ExtractDAFootprintGasScalar(genParams.txs[0].Data())
+ if err != nil {
+ return nil, err
+ }
+ }
if header.ParentBeaconRoot != nil {
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm)
}
@@ -437,7 +452,7 @@ func (miner *Miner) makeEnv(
if err != nil {
return nil, err
}
- state.StartPrefetcher("miner", bundle)
+ state.StartPrefetcher("miner", bundle, nil)
}
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
@@ -549,20 +564,20 @@ func (miner *Miner) applyTransaction(env *environment, tx *types.Transaction) (*
return receipt, err
}
-func (miner *Miner) commitTransactions(
- env *environment,
- plainTxs, blobTxs *transactionsByPriceAndNonce,
- interrupt *atomic.Int32,
-) error {
+func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce, interrupt *atomic.Int32) error {
var (
- isOsaka = miner.chainConfig.IsOsaka(env.header.Number, env.header.Time)
isCancun = miner.chainConfig.IsCancun(env.header.Number, env.header.Time)
gasLimit = env.header.GasLimit
)
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
}
+
+ // OP-Stack additions: throttling and DA footprint limit
blockDABytes := new(big.Int)
+ isJovian := miner.chainConfig.IsDAFootprintBlockLimit(env.header.Time)
+ minTransactionDAFootprint := types.MinTransactionSize.Uint64() * uint64(env.daFootprintGasScalar)
+
for {
// Check interruption signal and abort building if it's fired.
if interrupt != nil {
@@ -575,6 +590,17 @@ func (miner *Miner) commitTransactions(
log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas)
break
}
+
+ var daFootprintLeft uint64
+ if isJovian {
+ daFootprintLeft = gasLimit - *env.header.BlobGasUsed
+ // If we don't have enough DA space for any further transactions then we're done.
+ if daFootprintLeft < minTransactionDAFootprint {
+ log.Debug("Not enough DA space for further transactions", "have", daFootprintLeft, "want", minTransactionDAFootprint)
+ break
+ }
+ }
+
// If we don't have enough blob space for any further blob transactions,
// skip that list altogether
if !blobTxs.Empty() && env.blobs >= eip4844.MaxBlobsPerBlock(miner.chainConfig, env.header.Time) {
@@ -640,6 +666,19 @@ func (miner *Miner) commitTransactions(
}
}
+ // OP-Stack addition: Jovian DA footprint limit
+ var txDAFootprint uint64
+ // Note that commitTransaction is only called after deposit transactions have already been committed,
+ // so we don't need to resolve the transaction here and exclude deposits.
+ if isJovian {
+ txDAFootprint = ltx.DABytes.Uint64() * uint64(env.daFootprintGasScalar)
+ if daFootprintLeft < txDAFootprint {
+ log.Debug("Not enough DA space left for transaction", "hash", ltx.Hash, "left", daFootprintLeft, "needed", txDAFootprint)
+ txs.Pop()
+ continue
+ }
+ }
+
// OP-Stack addition: sequencer throttling
daBytesAfter := new(big.Int)
if ltx.DABytes != nil && miner.config.MaxDABlockSize != nil {
@@ -680,21 +719,6 @@ func (miner *Miner) commitTransactions(
if !env.txFitsSize(tx) {
break
}
-
- // Make sure all transactions after osaka have cell proofs
- if isOsaka {
- if sidecar := tx.BlobTxSidecar(); sidecar != nil {
- if sidecar.Version == types.BlobSidecarVersion0 {
- log.Info("Including blob tx with v0 sidecar, recomputing proofs", "hash", ltx.Hash)
- if err := sidecar.ToV1(); err != nil {
- txs.Pop()
- log.Warn("Failed to recompute cell proofs", "hash", ltx.Hash, "err", err)
- continue
- }
- }
- }
- }
-
// Error may be ignored here. The error has already been checked
// during transaction acceptance in the transaction pool.
from, _ := types.Sender(env.signer, tx)
@@ -750,6 +774,9 @@ func (miner *Miner) commitTransactions(
case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
blockDABytes = daBytesAfter
+ if isJovian {
+ *env.header.BlobGasUsed += txDAFootprint
+ }
txs.Shift()
default:
@@ -1110,7 +1137,6 @@ func totalFees(block *types.Block, receipts []*types.Receipt) *big.Int {
for i, tx := range block.Transactions() {
minerFee, _ := tx.EffectiveGasTip(block.BaseFee())
feesWei.Add(feesWei, new(big.Int).Mul(new(big.Int).SetUint64(receipts[i].GasUsed), minerFee))
- // TODO (MariusVanDerWijden) add blob fees
}
return feesWei
}
diff --git a/node/api.go b/node/api.go
index 33dfb3a1cc..e5dda5ac4d 100644
--- a/node/api.go
+++ b/node/api.go
@@ -191,7 +191,7 @@ func (api *adminAPI) StartHTTP(host *string, port *int, cors *string, apis *stri
}
if vhosts != nil {
config.Vhosts = nil
- for _, vhost := range strings.Split(*host, ",") {
+ for _, vhost := range strings.Split(*vhosts, ",") {
config.Vhosts = append(config.Vhosts, strings.TrimSpace(vhost))
}
}
diff --git a/node/errors.go b/node/errors.go
index 67547bf691..f9188f8d99 100644
--- a/node/errors.go
+++ b/node/errors.go
@@ -24,10 +24,9 @@ import (
)
var (
- ErrDatadirUsed = errors.New("datadir already used by another process")
- ErrNodeStopped = errors.New("node not started")
- ErrNodeRunning = errors.New("node already running")
- ErrServiceUnknown = errors.New("unknown service")
+ ErrDatadirUsed = errors.New("datadir already used by another process")
+ ErrNodeStopped = errors.New("node not started")
+ ErrNodeRunning = errors.New("node already running")
datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
)
diff --git a/node/node.go b/node/node.go
index 360e312735..dc23da59d7 100644
--- a/node/node.go
+++ b/node/node.go
@@ -1042,8 +1042,8 @@ func (n *Node) EventMux() *event.TypeMux {
return n.eventmux
}
-// OpenDatabase opens an existing database with the given name (or creates one if no
-// previous can be found) from within the node's instanceproto directory. If the node has no
+// OpenDatabaseWithOptions opens an existing database with the given name (or creates one if no
+// previous can be found) from within the node's instance directory. If the node has no
// data directory, an in-memory database is returned.
func (n *Node) OpenDatabaseWithOptions(name string, opt DatabaseOptions) (ethdb.Database, error) {
n.lock.Lock()
@@ -1073,7 +1073,7 @@ func (n *Node) OpenDatabaseWithOptions(name string, opt DatabaseOptions) (ethdb.
}
// OpenDatabase opens an existing database with the given name (or creates one if no
-// previous can be found) from within the node's instanceproto directory.
+// previous can be found) from within the node's instance directory.
// If the node has no data directory, an in-memory database is returned.
// Deprecated: use OpenDatabaseWithOptions instead.
func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, readonly bool) (ethdb.Database, error) {
diff --git a/p2p/config.go b/p2p/config.go
index 68a9c0bb5f..17607a1f88 100644
--- a/p2p/config.go
+++ b/p2p/config.go
@@ -35,8 +35,7 @@ type Config struct {
// This field must be set to a valid secp256k1 private key.
PrivateKey *ecdsa.PrivateKey `toml:"-"`
- // MaxPeers is the maximum number of peers that can be
- // connected. It must be greater than zero.
+ // MaxPeers is the maximum number of peers that can be connected.
MaxPeers int
// MaxPendingPeers is the maximum number of peers that can be pending in the
diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go
index 3a384aab12..6abe20d7a4 100644
--- a/p2p/discover/v5_udp_test.go
+++ b/p2p/discover/v5_udp_test.go
@@ -378,9 +378,93 @@ func TestUDPv5_findnodeCall(t *testing.T) {
if !reflect.DeepEqual(response, nodes) {
t.Fatalf("wrong nodes in response")
}
+}
+
+// BadIdentityScheme mocks an identity scheme not supported by the test node.
+type BadIdentityScheme struct{}
+
+func (s BadIdentityScheme) Verify(r *enr.Record, sig []byte) error { return nil }
+func (s BadIdentityScheme) NodeAddr(r *enr.Record) []byte {
+ var id enode.ID
+ r.Load(enr.WithEntry("badaddr", &id))
+ return id[:]
+}
+
+// This test covers invalid NODES responses for the FINDNODE call in a single table-driven test.
+func TestUDPv5_findnodeCall_InvalidNodes(t *testing.T) {
+ t.Parallel()
+ test := newUDPV5Test(t)
+ defer test.close()
- // TODO: check invalid IPs
- // TODO: check invalid/unsigned record
+ for i, tt := range []struct {
+ name string
+ ip enr.Entry
+ port enr.Entry
+ sign func(r *enr.Record, id enode.ID) *enode.Node
+ }{
+ {
+ name: "invalid ip (unspecified 0.0.0.0)",
+ ip: enr.IP(net.IPv4zero),
+ },
+ {
+ name: "invalid udp port (<=1024)",
+ port: enr.UDP(1024),
+ },
+ {
+ name: "invalid record, no signature",
+ sign: func(r *enr.Record, id enode.ID) *enode.Node {
+ r.Set(enr.ID("bad"))
+ r.Set(enr.WithEntry("badaddr", id))
+ r.SetSig(BadIdentityScheme{}, []byte{})
+ n, _ := enode.New(BadIdentityScheme{}, r)
+ return n
+ },
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ // Build ENR node for test.
+ var (
+ distance = 230
+ remote = test.getNode(test.remotekey, test.remoteaddr).Node()
+ id = idAtDistance(remote.ID(), distance)
+ r enr.Record
+ )
+ r.Set(enr.IP(intIP(i)))
+ if tt.ip != nil {
+ r.Set(tt.ip)
+ }
+ r.Set(enr.UDP(30303))
+ if tt.port != nil {
+ r.Set(tt.port)
+ }
+ r = *enode.SignNull(&r, id).Record()
+ if tt.sign != nil {
+ r = *tt.sign(&r, id).Record()
+ }
+
+ // Launch findnode request.
+ var (
+ done = make(chan error, 1)
+ got []*enode.Node
+ )
+ go func() {
+ var err error
+ got, err = test.udp.Findnode(remote, []uint{uint(distance)})
+ done <- err
+ }()
+
+ // Handle request.
+ test.waitPacketOut(func(p *v5wire.Findnode, _ netip.AddrPort, _ v5wire.Nonce) {
+ test.packetIn(&v5wire.Nodes{ReqID: p.ReqID, RespCount: 1, Nodes: []*enr.Record{&r}})
+ })
+ if err := <-done; err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if len(got) != 0 {
+ t.Fatalf("expected 0 nodes, got %d", len(got))
+ }
+ })
+ }
}
// This test checks that pending calls are re-sent when a handshake happens.
diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go
index ec5ef8a261..08292a70ba 100644
--- a/p2p/discover/v5wire/encoding.go
+++ b/p2p/discover/v5wire/encoding.go
@@ -27,6 +27,7 @@ import (
"errors"
"fmt"
"hash"
+ "slices"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -222,7 +223,7 @@ func (c *Codec) Encode(id enode.ID, addr string, packet Packet, challenge *Whoar
// Store sent WHOAREYOU challenges.
if challenge, ok := packet.(*Whoareyou); ok {
- challenge.ChallengeData = bytesCopy(&c.buf)
+ challenge.ChallengeData = slices.Clone(c.buf.Bytes())
enc, err := c.EncodeRaw(id, head, msgData)
if err != nil {
return nil, Nonce{}, err
@@ -325,7 +326,7 @@ func (c *Codec) encodeWhoareyou(toID enode.ID, packet *Whoareyou) (Header, error
// Create header.
head := c.makeHeader(toID, flagWhoareyou, 0)
- head.AuthData = bytesCopy(&c.buf)
+ head.AuthData = slices.Clone(c.buf.Bytes())
head.Nonce = packet.Nonce
// Encode auth data.
@@ -430,7 +431,7 @@ func (c *Codec) encodeMessageHeader(toID enode.ID, s *session) (Header, error) {
auth := messageAuthData{SrcID: c.localnode.ID()}
c.buf.Reset()
binary.Write(&c.buf, binary.BigEndian, &auth)
- head.AuthData = bytesCopy(&c.buf)
+ head.AuthData = slices.Clone(c.buf.Bytes())
head.Nonce = nonce
return head, err
}
@@ -686,9 +687,3 @@ func (h *Header) mask(destID enode.ID) cipher.Stream {
}
return cipher.NewCTR(block, h.IV[:])
}
-
-func bytesCopy(r *bytes.Buffer) []byte {
- b := make([]byte, r.Len())
- copy(b, r.Bytes())
- return b
-}
diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go
index f8f79a9436..4890321f49 100644
--- a/p2p/enode/iter.go
+++ b/p2p/enode/iter.go
@@ -38,7 +38,7 @@ type SourceIterator interface {
NodeSource() string // source of current node
}
-// WithSource attaches a 'source name' to an iterator.
+// WithSourceName attaches a 'source name' to an iterator.
func WithSourceName(name string, it Iterator) SourceIterator {
return sourceIter{it, name}
}
diff --git a/p2p/enode/localnode.go b/p2p/enode/localnode.go
index 6e79c9cbdc..6425560b02 100644
--- a/p2p/enode/localnode.go
+++ b/p2p/enode/localnode.go
@@ -45,7 +45,7 @@ const (
// current process. Setting ENR entries via the Set method updates the record. A new version
// of the record is signed on demand when the Node method is called.
type LocalNode struct {
- cur atomic.Value // holds a non-nil node pointer while the record is up-to-date
+ cur atomic.Pointer[Node] // holds a non-nil node pointer while the record is up-to-date
id ID
key *ecdsa.PrivateKey
@@ -82,7 +82,7 @@ func NewLocalNode(db *DB, key *ecdsa.PrivateKey) *LocalNode {
}
ln.seq = db.localSeq(ln.id)
ln.update = time.Now()
- ln.cur.Store((*Node)(nil))
+ ln.cur.Store(nil)
return ln
}
@@ -94,7 +94,7 @@ func (ln *LocalNode) Database() *DB {
// Node returns the current version of the local node record.
func (ln *LocalNode) Node() *Node {
// If we have a valid record, return that
- n := ln.cur.Load().(*Node)
+ n := ln.cur.Load()
if n != nil {
return n
}
@@ -105,7 +105,7 @@ func (ln *LocalNode) Node() *Node {
// Double check the current record, since multiple goroutines might be waiting
// on the write mutex.
- if n = ln.cur.Load().(*Node); n != nil {
+ if n = ln.cur.Load(); n != nil {
return n
}
@@ -121,7 +121,7 @@ func (ln *LocalNode) Node() *Node {
ln.sign()
ln.update = time.Now()
- return ln.cur.Load().(*Node)
+ return ln.cur.Load()
}
// Seq returns the current sequence number of the local node record.
@@ -276,11 +276,11 @@ func (e *lnEndpoint) get() (newIP net.IP, newPort uint16) {
}
func (ln *LocalNode) invalidate() {
- ln.cur.Store((*Node)(nil))
+ ln.cur.Store(nil)
}
func (ln *LocalNode) sign() {
- if n := ln.cur.Load().(*Node); n != nil {
+ if n := ln.cur.Load(); n != nil {
return // no changes
}
@@ -305,12 +305,3 @@ func (ln *LocalNode) bumpSeq() {
ln.seq++
ln.db.storeLocalSeq(ln.id, ln.seq)
}
-
-// nowMilliseconds gives the current timestamp at millisecond precision.
-func nowMilliseconds() uint64 {
- ns := time.Now().UnixNano()
- if ns < 0 {
- return 0
- }
- return uint64(ns / 1000 / 1000)
-}
diff --git a/p2p/enode/localnode_test.go b/p2p/enode/localnode_test.go
index f90e0b2f1d..121244afea 100644
--- a/p2p/enode/localnode_test.go
+++ b/p2p/enode/localnode_test.go
@@ -21,6 +21,7 @@ import (
"net"
"net/netip"
"testing"
+ "time"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p/enr"
@@ -53,7 +54,7 @@ func TestLocalNode(t *testing.T) {
// This test checks that the sequence number is persisted between restarts.
func TestLocalNodeSeqPersist(t *testing.T) {
- timestamp := nowMilliseconds()
+ timestamp := uint64(time.Now().UnixMilli())
ln, db := newLocalNodeForTesting()
defer db.Close()
diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go
index 51e554e68a..2cd211e2c2 100644
--- a/p2p/enode/nodedb.go
+++ b/p2p/enode/nodedb.go
@@ -434,7 +434,7 @@ func (db *DB) localSeq(id ID) uint64 {
if seq := db.fetchUint64(localItemKey(id, dbLocalSeq)); seq > 0 {
return seq
}
- return nowMilliseconds()
+ return uint64(time.Now().UnixMilli())
}
// storeLocalSeq stores the local record sequence counter.
diff --git a/p2p/msgrate/msgrate.go b/p2p/msgrate/msgrate.go
index db53a918bf..7842dde79f 100644
--- a/p2p/msgrate/msgrate.go
+++ b/p2p/msgrate/msgrate.go
@@ -171,8 +171,7 @@ func (t *Tracker) Capacity(kind uint64, targetRTT time.Duration) int {
// roundCapacity gives the integer value of a capacity.
// The result fits int32, and is guaranteed to be positive.
func roundCapacity(cap float64) int {
- const maxInt32 = float64(1<<31 - 1)
- return int(math.Min(maxInt32, math.Max(1, math.Ceil(cap))))
+ return int(min(math.MaxInt32, max(1, math.Ceil(cap))))
}
// Update modifies the peer's capacity values for a specific data type with a new
diff --git a/p2p/peer.go b/p2p/peer.go
index 9a0a750ac8..5521889f30 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -173,7 +173,6 @@ func (p *Peer) Fullname() string {
// Caps returns the capabilities (supported subprotocols) of the remote peer.
func (p *Peer) Caps() []Cap {
- // TODO: maybe return copy
return p.rw.caps
}
diff --git a/params/config.go b/params/config.go
index 50e5706551..b79de80435 100644
--- a/params/config.go
+++ b/params/config.go
@@ -68,11 +68,17 @@ var (
ShanghaiTime: newUint64(1681338455),
CancunTime: newUint64(1710338135),
PragueTime: newUint64(1746612311),
+ OsakaTime: newUint64(1764798551),
+ BPO1Time: newUint64(1765290071),
+ BPO2Time: newUint64(1767747671),
DepositContractAddress: common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"),
Ethash: new(EthashConfig),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
+ Osaka: DefaultOsakaBlobConfig,
+ BPO1: DefaultBPO1BlobConfig,
+ BPO2: DefaultBPO2BlobConfig,
},
}
// HoleskyChainConfig contains the chain parameters to run a node on the Holesky test network.
@@ -98,11 +104,17 @@ var (
ShanghaiTime: newUint64(1696000704),
CancunTime: newUint64(1707305664),
PragueTime: newUint64(1740434112),
+ OsakaTime: newUint64(1759308480),
+ BPO1Time: newUint64(1759800000),
+ BPO2Time: newUint64(1760389824),
DepositContractAddress: common.HexToAddress("0x4242424242424242424242424242424242424242"),
Ethash: new(EthashConfig),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
+ Osaka: DefaultOsakaBlobConfig,
+ BPO1: DefaultBPO1BlobConfig,
+ BPO2: DefaultBPO2BlobConfig,
},
}
// SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network.
@@ -128,11 +140,17 @@ var (
ShanghaiTime: newUint64(1677557088),
CancunTime: newUint64(1706655072),
PragueTime: newUint64(1741159776),
+ OsakaTime: newUint64(1760427360),
+ BPO1Time: newUint64(1761017184),
+ BPO2Time: newUint64(1761607008),
DepositContractAddress: common.HexToAddress("0x7f02c3e3c98b133055b8b348b2ac625669ed295d"),
Ethash: new(EthashConfig),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
+ Osaka: DefaultOsakaBlobConfig,
+ BPO1: DefaultBPO1BlobConfig,
+ BPO2: DefaultBPO2BlobConfig,
},
}
// HoodiChainConfig contains the chain parameters to run a node on the Hoodi test network.
@@ -158,11 +176,17 @@ var (
ShanghaiTime: newUint64(0),
CancunTime: newUint64(0),
PragueTime: newUint64(1742999832),
+ OsakaTime: newUint64(1761677592),
+ BPO1Time: newUint64(1762365720),
+ BPO2Time: newUint64(1762955544),
DepositContractAddress: common.HexToAddress("0x00000000219ab540356cBB839Cbe05303d7705Fa"),
Ethash: new(EthashConfig),
BlobScheduleConfig: &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
Prague: DefaultPragueBlobConfig,
+ Osaka: DefaultOsakaBlobConfig,
+ BPO1: DefaultBPO1BlobConfig,
+ BPO2: DefaultBPO2BlobConfig,
},
}
// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
@@ -359,6 +383,7 @@ var (
OptimismTestConfig = func() *ChainConfig {
conf := *MergedTestChainConfig // copy the config
conf.BlobScheduleConfig = nil
+ conf.OsakaTime = nil // needs to be removed when production fork introduces Osaka
conf.BedrockBlock = big.NewInt(0)
zero := uint64(0)
conf.RegolithTime = &zero
@@ -368,9 +393,9 @@ var (
conf.GraniteTime = &zero
conf.HoloceneTime = &zero
conf.IsthmusTime = &zero
- conf.InteropTime = nil
conf.JovianTime = nil
- conf.Optimism = &OptimismConfig{EIP1559Elasticity: 50, EIP1559Denominator: 10, EIP1559DenominatorCanyon: uint64ptr(250)}
+ conf.InteropTime = nil
+ conf.Optimism = &OptimismConfig{EIP1559Elasticity: 6, EIP1559Denominator: 50, EIP1559DenominatorCanyon: uint64ptr(250)}
return &conf
}()
)
@@ -394,6 +419,30 @@ var (
Max: 9,
UpdateFraction: 5007716,
}
+ // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork.
+ DefaultBPO1BlobConfig = &BlobConfig{
+ Target: 10,
+ Max: 15,
+ UpdateFraction: 8346193,
+ }
+ // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork.
+ DefaultBPO2BlobConfig = &BlobConfig{
+ Target: 14,
+ Max: 21,
+ UpdateFraction: 11684671,
+ }
+ // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork.
+ DefaultBPO3BlobConfig = &BlobConfig{
+ Target: 21,
+ Max: 32,
+ UpdateFraction: 20609697,
+ }
+ // DefaultBPO1BlobConfig is the default blob configuration for the Osaka fork.
+ DefaultBPO4BlobConfig = &BlobConfig{
+ Target: 14,
+ Max: 21,
+ UpdateFraction: 13739630,
+ }
// DefaultBlobSchedule is the latest configured blob schedule for Ethereum mainnet.
DefaultBlobSchedule = &BlobScheduleConfig{
Cancun: DefaultCancunBlobConfig,
@@ -1205,6 +1254,16 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
london := c.LondonBlock
switch {
+ case c.IsBPO5(london, time):
+ return forks.BPO5
+ case c.IsBPO4(london, time):
+ return forks.BPO4
+ case c.IsBPO3(london, time):
+ return forks.BPO3
+ case c.IsBPO2(london, time):
+ return forks.BPO2
+ case c.IsBPO1(london, time):
+ return forks.BPO1
case c.IsOsaka(london, time):
return forks.Osaka
case c.IsPrague(london, time):
@@ -1218,10 +1277,66 @@ func (c *ChainConfig) LatestFork(time uint64) forks.Fork {
}
}
+// BlobConfig returns the blob config associated with the provided fork.
+func (c *ChainConfig) BlobConfig(fork forks.Fork) *BlobConfig {
+ // TODO: https://github.com/ethereum-optimism/op-geth/issues/685
+ // This function has a bug.
+ switch fork {
+ case forks.BPO5:
+ return c.BlobScheduleConfig.BPO5
+ case forks.BPO4:
+ return c.BlobScheduleConfig.BPO4
+ case forks.BPO3:
+ return c.BlobScheduleConfig.BPO3
+ case forks.BPO2:
+ return c.BlobScheduleConfig.BPO2
+ case forks.BPO1:
+ return c.BlobScheduleConfig.BPO1
+ case forks.Osaka:
+ return c.BlobScheduleConfig.Osaka
+ case forks.Prague:
+ return c.BlobScheduleConfig.Prague
+ case forks.Cancun:
+ return c.BlobScheduleConfig.Cancun
+ default:
+ return nil
+ }
+}
+
+// ActiveSystemContracts returns the currently active system contracts at the
+// given timestamp.
+func (c *ChainConfig) ActiveSystemContracts(time uint64) map[string]common.Address {
+ fork := c.LatestFork(time)
+ active := make(map[string]common.Address)
+ if fork >= forks.Osaka {
+ // no new system contracts
+ }
+ if fork >= forks.Prague {
+ active["CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS"] = ConsolidationQueueAddress
+ active["DEPOSIT_CONTRACT_ADDRESS"] = c.DepositContractAddress
+ active["HISTORY_STORAGE_ADDRESS"] = HistoryStorageAddress
+ active["WITHDRAWAL_REQUEST_PREDEPLOY_ADDRESS"] = WithdrawalQueueAddress
+ }
+ if fork >= forks.Cancun {
+ active["BEACON_ROOTS_ADDRESS"] = BeaconRootsAddress
+ }
+ return active
+}
+
// Timestamp returns the timestamp associated with the fork or returns nil if
// the fork isn't defined or isn't a time-based fork.
func (c *ChainConfig) Timestamp(fork forks.Fork) *uint64 {
switch {
+ case fork == forks.BPO5:
+ return c.BPO5Time
+ case fork == forks.BPO4:
+ return c.BPO4Time
+ case fork == forks.BPO3:
+ return c.BPO3Time
+ case fork == forks.BPO2:
+ return c.BPO2Time
+ case fork == forks.BPO1:
+ return c.BPO1Time
case fork == forks.Osaka:
return c.OsakaTime
case fork == forks.Prague:
diff --git a/params/forks/forks.go b/params/forks/forks.go
index 5c9612a625..aab0a54ab7 100644
--- a/params/forks/forks.go
+++ b/params/forks/forks.go
@@ -40,6 +40,11 @@ const (
Cancun
Prague
Osaka
+ BPO1
+ BPO2
+ BPO3
+ BPO4
+ BPO5
)
// String implements fmt.Stringer.
diff --git a/params/optimism_feature_toggles.go b/params/optimism_feature_toggles.go
new file mode 100644
index 0000000000..6113844af0
--- /dev/null
+++ b/params/optimism_feature_toggles.go
@@ -0,0 +1,17 @@
+package params
+
+// OPStack diff
+// This file contains ephemeral feature toggles which should be removed
+// after the fork scope is locked.
+
+func (c *ChainConfig) IsMinBaseFee(time uint64) bool {
+ return c.IsJovian(time) // Replace with return false to disable
+}
+
+func (c *ChainConfig) IsDAFootprintBlockLimit(time uint64) bool {
+ return c.IsJovian(time) // Replace with return false to disable
+}
+
+func (c *ChainConfig) IsOperatorFeeFix(time uint64) bool {
+ return c.IsJovian(time) // Replace with return false to disable
+}
diff --git a/params/protocol_params.go b/params/protocol_params.go
index fd36db350f..cd49a521e0 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -185,6 +185,11 @@ const (
Bls12381G2MulMaxInputSizeIsthmus uint64 = 488448 // Maximum input size for BLS12-381 G2 multiple-scalar-multiply operation
Bls12381PairingMaxInputSizeIsthmus uint64 = 235008 // Maximum input size for BLS12-381 pairing check
+ Bn256PairingMaxInputSizeJovian uint64 = 81984 // bn256Pairing limit (427 pairs)
+ Bls12381G1MulMaxInputSizeJovian uint64 = 288960 // BLS12-381 G1 MSM limit (1,806 pairs)
+ Bls12381G2MulMaxInputSizeJovian uint64 = 278784 // BLS12-381 G2 MSM limit (968 pairs)
+ Bls12381PairingMaxInputSizeJovian uint64 = 156672 // BLS12-381 pairing limit (408 pairs)
+
// The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529,
// up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529
RefundQuotient uint64 = 2
@@ -198,7 +203,7 @@ const (
BlobTxMaxBlobs = 6
BlobBaseCost = 1 << 13 // Base execution gas cost for a blob.
- HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935.
+ HistoryServeWindow = 8191 // Number of blocks to serve historical block hashes for, EIP-2935.
MaxBlockSize = 8_388_608 // maximum size of an RLP-encoded block
)
diff --git a/rlp/decode.go b/rlp/decode.go
index 0fbca243ee..19074072fb 100644
--- a/rlp/decode.go
+++ b/rlp/decode.go
@@ -148,9 +148,9 @@ func addErrorContext(err error, ctx string) error {
}
var (
- decoderInterface = reflect.TypeOf(new(Decoder)).Elem()
- bigInt = reflect.TypeOf(big.Int{})
- u256Int = reflect.TypeOf(uint256.Int{})
+ decoderInterface = reflect.TypeFor[Decoder]()
+ bigInt = reflect.TypeFor[big.Int]()
+ u256Int = reflect.TypeFor[uint256.Int]()
)
func makeDecoder(typ reflect.Type, tags rlpstruct.Tags) (dec decoder, err error) {
@@ -371,7 +371,7 @@ func decodeByteArray(s *Stream, val reflect.Value) error {
if err != nil {
return err
}
- slice := byteArrayBytes(val, val.Len())
+ slice := val.Bytes()
switch kind {
case Byte:
if len(slice) == 0 {
@@ -512,7 +512,7 @@ func makeNilPtrDecoder(etype reflect.Type, etypeinfo *typeinfo, ts rlpstruct.Tag
}
}
-var ifsliceType = reflect.TypeOf([]interface{}{})
+var ifsliceType = reflect.TypeFor[[]any]()
func decodeInterface(s *Stream, val reflect.Value) error {
if val.Type().NumMethod() != 0 {
diff --git a/rlp/encbuffer.go b/rlp/encbuffer.go
index 8d3a3b2293..61d8bd059c 100644
--- a/rlp/encbuffer.go
+++ b/rlp/encbuffer.go
@@ -23,6 +23,7 @@ import (
"reflect"
"sync"
+ "github.com/ethereum/go-ethereum/common/math"
"github.com/holiman/uint256"
)
@@ -145,9 +146,6 @@ func (buf *encBuffer) writeString(s string) {
buf.writeBytes([]byte(s))
}
-// wordBytes is the number of bytes in a big.Word
-const wordBytes = (32 << (uint64(^big.Word(0)) >> 63)) / 8
-
// writeBigInt writes i as an integer.
func (buf *encBuffer) writeBigInt(i *big.Int) {
bitlen := i.BitLen()
@@ -161,15 +159,8 @@ func (buf *encBuffer) writeBigInt(i *big.Int) {
length := ((bitlen + 7) & -8) >> 3
buf.encodeStringHeader(length)
buf.str = append(buf.str, make([]byte, length)...)
- index := length
bytesBuf := buf.str[len(buf.str)-length:]
- for _, d := range i.Bits() {
- for j := 0; j < wordBytes && index > 0; j++ {
- index--
- bytesBuf[index] = byte(d)
- d >>= 8
- }
- }
+ math.ReadBits(i, bytesBuf)
}
// writeUint256 writes z as an integer.
diff --git a/rlp/encode.go b/rlp/encode.go
index 3645bbfda0..ed99275739 100644
--- a/rlp/encode.go
+++ b/rlp/encode.go
@@ -21,6 +21,7 @@ import (
"fmt"
"io"
"math/big"
+ "math/bits"
"reflect"
"github.com/ethereum/go-ethereum/rlp/internal/rlpstruct"
@@ -133,7 +134,7 @@ func puthead(buf []byte, smalltag, largetag byte, size uint64) int {
return sizesize + 1
}
-var encoderInterface = reflect.TypeOf(new(Encoder)).Elem()
+var encoderInterface = reflect.TypeFor[Encoder]()
// makeWriter creates a writer function for the given type.
func makeWriter(typ reflect.Type, ts rlpstruct.Tags) (writer, error) {
@@ -239,7 +240,6 @@ func makeByteArrayWriter(typ reflect.Type) writer {
case 1:
return writeLengthOneByteArray
default:
- length := typ.Len()
return func(val reflect.Value, w *encBuffer) error {
if !val.CanAddr() {
// Getting the byte slice of val requires it to be addressable. Make it
@@ -248,7 +248,7 @@ func makeByteArrayWriter(typ reflect.Type) writer {
copy.Set(val)
val = copy
}
- slice := byteArrayBytes(val, length)
+ slice := val.Bytes()
w.encodeStringHeader(len(slice))
w.str = append(w.str, slice...)
return nil
@@ -487,9 +487,8 @@ func putint(b []byte, i uint64) (size int) {
// intsize computes the minimum number of bytes required to store i.
func intsize(i uint64) (size int) {
- for size = 1; ; size++ {
- if i >>= 8; i == 0 {
- return size
- }
+ if i == 0 {
+ return 1
}
+ return (bits.Len64(i) + 7) / 8
}
diff --git a/rlp/raw.go b/rlp/raw.go
index 879e3bfe5d..cec90346a1 100644
--- a/rlp/raw.go
+++ b/rlp/raw.go
@@ -26,7 +26,7 @@ import (
// not verify whether the content of RawValues is valid RLP.
type RawValue []byte
-var rawValueType = reflect.TypeOf(RawValue{})
+var rawValueType = reflect.TypeFor[RawValue]()
// StringSize returns the encoded size of a string.
func StringSize(s string) uint64 {
diff --git a/rpc/server.go b/rpc/server.go
index 3243882886..417c957e1e 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -54,6 +54,7 @@ type Server struct {
batchItemLimit int
batchResponseLimit int
httpBodyLimit int
+ wsReadLimit int64
recorder Recorder // optional, may be nil
}
@@ -64,6 +65,7 @@ func NewServer() *Server {
idgen: randomIDGenerator(),
codecs: make(map[ServerCodec]struct{}),
httpBodyLimit: defaultBodyLimit,
+ wsReadLimit: wsDefaultReadLimit,
}
server.run.Store(true)
// Register the default service providing meta information about the RPC service such
@@ -95,6 +97,13 @@ func (s *Server) SetHTTPBodyLimit(limit int) {
s.httpBodyLimit = limit
}
+// SetWebsocketReadLimit sets the limit for max message size for Websocket requests.
+//
+// This method should be called before processing any requests via Websocket server.
+func (s *Server) SetWebsocketReadLimit(limit int64) {
+ s.wsReadLimit = limit
+}
+
// RegisterName creates a service for the given receiver type under the given name. When no
// methods on the given receiver match the criteria to be either an RPC method or a
// subscription an error is returned. Otherwise a new service is created and added to the
diff --git a/rpc/server_test.go b/rpc/server_test.go
index 9ee545d81a..a38a64b080 100644
--- a/rpc/server_test.go
+++ b/rpc/server_test.go
@@ -19,13 +19,18 @@ package rpc
import (
"bufio"
"bytes"
+ "context"
+ "errors"
"io"
"net"
+ "net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
"time"
+
+ "github.com/gorilla/websocket"
)
func TestServerRegisterName(t *testing.T) {
@@ -202,3 +207,86 @@ func TestServerBatchResponseSizeLimit(t *testing.T) {
}
}
}
+
+func TestServerWebsocketReadLimit(t *testing.T) {
+ t.Parallel()
+
+ // Test different read limits
+ testCases := []struct {
+ name string
+ readLimit int64
+ testSize int
+ shouldFail bool
+ }{
+ {
+ name: "limit with small request - should succeed",
+ readLimit: 4096, // generous limit to comfortably allow JSON overhead
+ testSize: 256, // reasonably small payload
+ shouldFail: false,
+ },
+ {
+ name: "limit with large request - should fail",
+ readLimit: 256, // tight limit to trigger server-side read limit
+ testSize: 1024, // payload that will exceed the limit including JSON overhead
+ shouldFail: true,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ // Create server and set read limits
+ srv := newTestServer()
+ srv.SetWebsocketReadLimit(tc.readLimit)
+ defer srv.Stop()
+
+ // Start HTTP server with WebSocket handler
+ httpsrv := httptest.NewServer(srv.WebsocketHandler([]string{"*"}))
+ defer httpsrv.Close()
+
+ wsURL := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:")
+
+ // Connect WebSocket client
+ client, err := DialOptions(context.Background(), wsURL)
+ if err != nil {
+ t.Fatalf("can't dial: %v", err)
+ }
+ defer client.Close()
+
+ // Create large request data - this is what will be limited
+ largeString := strings.Repeat("A", tc.testSize)
+
+ // Send the large string as a parameter in the request
+ var result echoResult
+ err = client.Call(&result, "test_echo", largeString, 42, &echoArgs{S: "test"})
+
+ if tc.shouldFail {
+ // Expecting an error due to read limit exceeded
+ if err == nil {
+ t.Fatalf("expected error for request size %d with limit %d, but got none", tc.testSize, tc.readLimit)
+ }
+ // Be tolerant about the exact error surfaced by gorilla/websocket.
+ // Prefer a CloseError with code 1009, but accept ErrReadLimit or an error string containing 1009/message too big.
+ var cerr *websocket.CloseError
+ if errors.As(err, &cerr) {
+ if cerr.Code != websocket.CloseMessageTooBig {
+ t.Fatalf("unexpected websocket close code: have %d want %d (err=%v)", cerr.Code, websocket.CloseMessageTooBig, err)
+ }
+ } else if !errors.Is(err, websocket.ErrReadLimit) &&
+ !strings.Contains(strings.ToLower(err.Error()), "1009") &&
+ !strings.Contains(strings.ToLower(err.Error()), "message too big") {
+ // Not the error we expect from exceeding the message size limit.
+ t.Fatalf("unexpected error for read limit violation: %v", err)
+ }
+ } else {
+ // Expecting success
+ if err != nil {
+ t.Fatalf("unexpected error for request size %d with limit %d: %v", tc.testSize, tc.readLimit, err)
+ }
+ // Verify the response is correct - the echo should return our string
+ if result.String != largeString {
+ t.Fatalf("expected echo result to match input")
+ }
+ }
+ })
+ }
+}
diff --git a/rpc/service.go b/rpc/service.go
index d50090e9fb..0f62d7eb7c 100644
--- a/rpc/service.go
+++ b/rpc/service.go
@@ -29,10 +29,10 @@ import (
)
var (
- contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- subscriptionType = reflect.TypeOf(Subscription{})
- stringType = reflect.TypeOf("")
+ contextType = reflect.TypeFor[context.Context]()
+ errorType = reflect.TypeFor[error]()
+ subscriptionType = reflect.TypeFor[Subscription]()
+ stringType = reflect.TypeFor[string]()
)
type serviceRegistry struct {
diff --git a/rpc/websocket.go b/rpc/websocket.go
index 9f67caf859..543ff617ba 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -60,7 +60,7 @@ func (s *Server) WebsocketHandler(allowedOrigins []string) http.Handler {
log.Debug("WebSocket upgrade failed", "err", err)
return
}
- codec := newWebsocketCodec(conn, r.Host, r.Header, wsDefaultReadLimit)
+ codec := newWebsocketCodec(conn, r.Host, r.Header, s.wsReadLimit)
s.ServeCodec(codec, 0)
})
}
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index a8d8624900..3b7d5a9da0 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -121,56 +121,49 @@ func TestWebsocketLargeRead(t *testing.T) {
srv = newTestServer()
httpsrv = httptest.NewServer(srv.WebsocketHandler([]string{"*"}))
wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:")
+ buffer = 64
)
defer srv.Stop()
defer httpsrv.Close()
- testLimit := func(limit *int64) {
- opts := []ClientOption{}
- expLimit := int64(wsDefaultReadLimit)
- if limit != nil && *limit >= 0 {
- opts = append(opts, WithWebsocketMessageSizeLimit(*limit))
- if *limit > 0 {
- expLimit = *limit // 0 means infinite
+ for _, tt := range []struct {
+ size int
+ limit int
+ err bool
+ }{
+ {200, 200, false}, // Small, successful request and limit
+ {2048, 1024, true}, // Normal, failed request
+ {wsDefaultReadLimit + buffer, 0, false}, // Large, successful request, infinite limit
+ } {
+ func() {
+ if tt.limit != 0 {
+ // Some buffer is added to the limit to account for JSON encoding. It's
+ // skipped when the limit is zero since the intention is for the limit
+ // to be infinite.
+ tt.limit += buffer
}
- }
- client, err := DialOptions(context.Background(), wsURL, opts...)
- if err != nil {
- t.Fatalf("can't dial: %v", err)
- }
- defer client.Close()
- // Remove some bytes for json encoding overhead.
- underLimit := int(expLimit - 128)
- overLimit := expLimit + 1
- if expLimit == wsDefaultReadLimit {
- // No point trying the full 32MB in tests. Just sanity-check that
- // it's not obviously limited.
- underLimit = 1024
- overLimit = -1
- }
- var res string
- // Check under limit
- if err = client.Call(&res, "test_repeat", "A", underLimit); err != nil {
- t.Fatalf("unexpected error with limit %d: %v", expLimit, err)
- }
- if len(res) != underLimit || strings.Count(res, "A") != underLimit {
- t.Fatal("incorrect data")
- }
- // Check over limit
- if overLimit > 0 {
- err = client.Call(&res, "test_repeat", "A", expLimit+1)
- if err == nil || err != websocket.ErrReadLimit {
- t.Fatalf("wrong error with limit %d: %v expecting %v", expLimit, err, websocket.ErrReadLimit)
+ opts := []ClientOption{WithWebsocketMessageSizeLimit(int64(tt.limit))}
+ client, err := DialOptions(context.Background(), wsURL, opts...)
+ if err != nil {
+ t.Fatalf("failed to dial test server: %v", err)
}
- }
- }
- ptr := func(v int64) *int64 { return &v }
+ defer client.Close()
- testLimit(ptr(-1)) // Should be ignored (use default)
- testLimit(ptr(0)) // Should be ignored (use default)
- testLimit(nil) // Should be ignored (use default)
- testLimit(ptr(200))
- testLimit(ptr(wsDefaultReadLimit * 2))
+ var res string
+ err = client.Call(&res, "test_repeat", "A", tt.size)
+ if tt.err && err == nil {
+ t.Fatalf("expected error, got none")
+ }
+ if !tt.err {
+ if err != nil {
+ t.Fatalf("unexpected error with limit %d: %v", tt.limit, err)
+ }
+ if strings.Count(res, "A") != tt.size {
+ t.Fatal("incorrect data")
+ }
+ }
+ }()
+ }
}
func TestWebsocketPeerInfo(t *testing.T) {
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index b5fd5a2854..9034e7e9ca 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -167,8 +167,11 @@ func (args *SendTxArgs) ToTransaction() (*types.Transaction, error) {
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
}
if args.Blobs != nil {
- // TODO(rjl493456442, marius) support V1
- data.(*types.BlobTx).Sidecar = types.NewBlobTxSidecar(types.BlobSidecarVersion0, args.Blobs, args.Commitments, args.Proofs)
+ version := types.BlobSidecarVersion0
+ if len(args.Proofs) == len(args.Blobs)*kzg4844.CellProofsPerBlob {
+ version = types.BlobSidecarVersion1
+ }
+ data.(*types.BlobTx).Sidecar = types.NewBlobTxSidecar(version, args.Blobs, args.Commitments, args.Proofs)
}
case args.MaxFeePerGas != nil:
@@ -544,7 +547,7 @@ func parseBytes(encType interface{}) ([]byte, bool) {
// Handle array types.
val := reflect.ValueOf(encType)
if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 {
- v := reflect.MakeSlice(reflect.TypeOf([]byte{}), val.Len(), val.Len())
+ v := reflect.ValueOf(make([]byte, val.Len()))
reflect.Copy(v, val)
return v.Bytes(), true
}
diff --git a/superchain-registry-commit.txt b/superchain-registry-commit.txt
index 5ce6d82034..b5243710e5 100644
--- a/superchain-registry-commit.txt
+++ b/superchain-registry-commit.txt
@@ -1 +1 @@
-d56233c1e5254fc2fd769d5b33269502a1fe9ef8
\ No newline at end of file
+720185c32b0599fa31b14f101cbc990ec39c0a36
diff --git a/superchain/superchain-configs.zip b/superchain/superchain-configs.zip
index c9eb8f0a12..c0317192e5 100644
Binary files a/superchain/superchain-configs.zip and b/superchain/superchain-configs.zip differ
diff --git a/tests/init.go b/tests/init.go
index a8bc424fa2..705e929ae9 100644
--- a/tests/init.go
+++ b/tests/init.go
@@ -464,6 +464,274 @@ var Forks = map[string]*params.ChainConfig{
Osaka: params.DefaultOsakaBlobConfig,
},
},
+ "BPO1": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ },
+ },
+ "OsakaToBPO1AtTime15k": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(15_000),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ },
+ },
+ "BPO2": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(0),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ },
+ },
+ "BPO1ToBPO2AtTime15k": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(15_000),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ },
+ },
+ "BPO3": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(0),
+ BPO3Time: u64(0),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ BPO3: params.DefaultBPO3BlobConfig,
+ },
+ },
+ "BPO2ToBPO3AtTime15k": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(0),
+ BPO3Time: u64(15_000),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ BPO3: params.DefaultBPO3BlobConfig,
+ },
+ },
+ "BPO4": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(0),
+ BPO3Time: u64(0),
+ BPO4Time: u64(0),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ BPO3: params.DefaultBPO3BlobConfig,
+ BPO4: params.DefaultBPO4BlobConfig,
+ },
+ },
+ "BPO3ToBPO4AtTime15k": {
+ ChainID: big.NewInt(1),
+ HomesteadBlock: big.NewInt(0),
+ EIP150Block: big.NewInt(0),
+ EIP155Block: big.NewInt(0),
+ EIP158Block: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: big.NewInt(0),
+ TerminalTotalDifficulty: big.NewInt(0),
+ ShanghaiTime: u64(0),
+ CancunTime: u64(0),
+ PragueTime: u64(0),
+ OsakaTime: u64(0),
+ BPO1Time: u64(0),
+ BPO2Time: u64(0),
+ BPO3Time: u64(0),
+ BPO4Time: u64(15_000),
+ DepositContractAddress: params.MainnetChainConfig.DepositContractAddress,
+ BlobScheduleConfig: ¶ms.BlobScheduleConfig{
+ Cancun: params.DefaultCancunBlobConfig,
+ Prague: params.DefaultPragueBlobConfig,
+ Osaka: params.DefaultOsakaBlobConfig,
+ BPO1: bpo1BlobConfig,
+ BPO2: bpo2BlobConfig,
+ BPO3: params.DefaultBPO3BlobConfig,
+ BPO4: params.DefaultBPO4BlobConfig,
+ },
+ },
+}
+
+var bpo1BlobConfig = ¶ms.BlobConfig{
+ Target: 9,
+ Max: 14,
+ UpdateFraction: 8832827,
+}
+
+var bpo2BlobConfig = ¶ms.BlobConfig{
+ Target: 14,
+ Max: 21,
+ UpdateFraction: 13739630,
}
// AvailableForks returns the set of defined fork names
diff --git a/trie/bintrie/binary_node.go b/trie/bintrie/binary_node.go
new file mode 100644
index 0000000000..1c003a6c8f
--- /dev/null
+++ b/trie/bintrie/binary_node.go
@@ -0,0 +1,133 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type (
+ NodeFlushFn func([]byte, BinaryNode)
+ NodeResolverFn func([]byte, common.Hash) ([]byte, error)
+)
+
+// zero is the zero value for a 32-byte array.
+var zero [32]byte
+
+const (
+ NodeWidth = 256 // Number of child per leaf node
+ StemSize = 31 // Number of bytes to travel before reaching a group of leaves
+)
+
+const (
+ nodeTypeStem = iota + 1 // Stem node, contains a stem and a bitmap of values
+ nodeTypeInternal
+)
+
+// BinaryNode is an interface for a binary trie node.
+type BinaryNode interface {
+ Get([]byte, NodeResolverFn) ([]byte, error)
+ Insert([]byte, []byte, NodeResolverFn, int) (BinaryNode, error)
+ Copy() BinaryNode
+ Hash() common.Hash
+ GetValuesAtStem([]byte, NodeResolverFn) ([][]byte, error)
+ InsertValuesAtStem([]byte, [][]byte, NodeResolverFn, int) (BinaryNode, error)
+ CollectNodes([]byte, NodeFlushFn) error
+
+ toDot(parent, path string) string
+ GetHeight() int
+}
+
+// SerializeNode serializes a binary trie node into a byte slice.
+func SerializeNode(node BinaryNode) []byte {
+ switch n := (node).(type) {
+ case *InternalNode:
+ var serialized [65]byte
+ serialized[0] = nodeTypeInternal
+ copy(serialized[1:33], n.left.Hash().Bytes())
+ copy(serialized[33:65], n.right.Hash().Bytes())
+ return serialized[:]
+ case *StemNode:
+ var serialized [32 + 32 + 256*32]byte
+ serialized[0] = nodeTypeStem
+ copy(serialized[1:32], node.(*StemNode).Stem)
+ bitmap := serialized[32:64]
+ offset := 64
+ for i, v := range node.(*StemNode).Values {
+ if v != nil {
+ bitmap[i/8] |= 1 << (7 - (i % 8))
+ copy(serialized[offset:offset+32], v)
+ offset += 32
+ }
+ }
+ return serialized[:]
+ default:
+ panic("invalid node type")
+ }
+}
+
+var invalidSerializedLength = errors.New("invalid serialized node length")
+
+// DeserializeNode deserializes a binary trie node from a byte slice.
+func DeserializeNode(serialized []byte, depth int) (BinaryNode, error) {
+ if len(serialized) == 0 {
+ return Empty{}, nil
+ }
+
+ switch serialized[0] {
+ case nodeTypeInternal:
+ if len(serialized) != 65 {
+ return nil, invalidSerializedLength
+ }
+ return &InternalNode{
+ depth: depth,
+ left: HashedNode(common.BytesToHash(serialized[1:33])),
+ right: HashedNode(common.BytesToHash(serialized[33:65])),
+ }, nil
+ case nodeTypeStem:
+ if len(serialized) < 64 {
+ return nil, invalidSerializedLength
+ }
+ var values [256][]byte
+ bitmap := serialized[32:64]
+ offset := 64
+
+ for i := range 256 {
+ if bitmap[i/8]>>(7-(i%8))&1 == 1 {
+ if len(serialized) < offset+32 {
+ return nil, invalidSerializedLength
+ }
+ values[i] = serialized[offset : offset+32]
+ offset += 32
+ }
+ }
+ return &StemNode{
+ Stem: serialized[1:32],
+ Values: values[:],
+ depth: depth,
+ }, nil
+ default:
+ return nil, errors.New("invalid node type")
+ }
+}
+
+// ToDot converts the binary trie to a DOT language representation. Useful for debugging.
+func ToDot(root BinaryNode) string {
+ return root.toDot("", "")
+}
diff --git a/trie/bintrie/binary_node_test.go b/trie/bintrie/binary_node_test.go
new file mode 100644
index 0000000000..b21daaab69
--- /dev/null
+++ b/trie/bintrie/binary_node_test.go
@@ -0,0 +1,252 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// TestSerializeDeserializeInternalNode tests serialization and deserialization of InternalNode
+func TestSerializeDeserializeInternalNode(t *testing.T) {
+ // Create an internal node with two hashed children
+ leftHash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
+ rightHash := common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321")
+
+ node := &InternalNode{
+ depth: 5,
+ left: HashedNode(leftHash),
+ right: HashedNode(rightHash),
+ }
+
+ // Serialize the node
+ serialized := SerializeNode(node)
+
+ // Check the serialized format
+ if serialized[0] != nodeTypeInternal {
+ t.Errorf("Expected type byte to be %d, got %d", nodeTypeInternal, serialized[0])
+ }
+
+ if len(serialized) != 65 {
+ t.Errorf("Expected serialized length to be 65, got %d", len(serialized))
+ }
+
+ // Deserialize the node
+ deserialized, err := DeserializeNode(serialized, 5)
+ if err != nil {
+ t.Fatalf("Failed to deserialize node: %v", err)
+ }
+
+ // Check that it's an internal node
+ internalNode, ok := deserialized.(*InternalNode)
+ if !ok {
+ t.Fatalf("Expected InternalNode, got %T", deserialized)
+ }
+
+ // Check the depth
+ if internalNode.depth != 5 {
+ t.Errorf("Expected depth 5, got %d", internalNode.depth)
+ }
+
+ // Check the left and right hashes
+ if internalNode.left.Hash() != leftHash {
+ t.Errorf("Left hash mismatch: expected %x, got %x", leftHash, internalNode.left.Hash())
+ }
+
+ if internalNode.right.Hash() != rightHash {
+ t.Errorf("Right hash mismatch: expected %x, got %x", rightHash, internalNode.right.Hash())
+ }
+}
+
+// TestSerializeDeserializeStemNode tests serialization and deserialization of StemNode
+func TestSerializeDeserializeStemNode(t *testing.T) {
+ // Create a stem node with some values
+ stem := make([]byte, 31)
+ for i := range stem {
+ stem[i] = byte(i)
+ }
+
+ var values [256][]byte
+ // Add some values at different indices
+ values[0] = common.HexToHash("0x0101010101010101010101010101010101010101010101010101010101010101").Bytes()
+ values[10] = common.HexToHash("0x0202020202020202020202020202020202020202020202020202020202020202").Bytes()
+ values[255] = common.HexToHash("0x0303030303030303030303030303030303030303030303030303030303030303").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 10,
+ }
+
+ // Serialize the node
+ serialized := SerializeNode(node)
+
+ // Check the serialized format
+ if serialized[0] != nodeTypeStem {
+ t.Errorf("Expected type byte to be %d, got %d", nodeTypeStem, serialized[0])
+ }
+
+ // Check the stem is correctly serialized
+ if !bytes.Equal(serialized[1:32], stem) {
+ t.Errorf("Stem mismatch in serialized data")
+ }
+
+ // Deserialize the node
+ deserialized, err := DeserializeNode(serialized, 10)
+ if err != nil {
+ t.Fatalf("Failed to deserialize node: %v", err)
+ }
+
+ // Check that it's a stem node
+ stemNode, ok := deserialized.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", deserialized)
+ }
+
+ // Check the stem
+ if !bytes.Equal(stemNode.Stem, stem) {
+ t.Errorf("Stem mismatch after deserialization")
+ }
+
+ // Check the values
+ if !bytes.Equal(stemNode.Values[0], values[0]) {
+ t.Errorf("Value at index 0 mismatch")
+ }
+ if !bytes.Equal(stemNode.Values[10], values[10]) {
+ t.Errorf("Value at index 10 mismatch")
+ }
+ if !bytes.Equal(stemNode.Values[255], values[255]) {
+ t.Errorf("Value at index 255 mismatch")
+ }
+
+ // Check that other values are nil
+ for i := range NodeWidth {
+ if i == 0 || i == 10 || i == 255 {
+ continue
+ }
+ if stemNode.Values[i] != nil {
+ t.Errorf("Expected nil value at index %d, got %x", i, stemNode.Values[i])
+ }
+ }
+}
+
+// TestDeserializeEmptyNode tests deserialization of empty node
+func TestDeserializeEmptyNode(t *testing.T) {
+ // Empty byte slice should deserialize to Empty node
+ deserialized, err := DeserializeNode([]byte{}, 0)
+ if err != nil {
+ t.Fatalf("Failed to deserialize empty node: %v", err)
+ }
+
+ _, ok := deserialized.(Empty)
+ if !ok {
+ t.Fatalf("Expected Empty node, got %T", deserialized)
+ }
+}
+
+// TestDeserializeInvalidType tests deserialization with invalid type byte
+func TestDeserializeInvalidType(t *testing.T) {
+ // Create invalid serialized data with unknown type byte
+ invalidData := []byte{99, 0, 0, 0} // Type byte 99 is invalid
+
+ _, err := DeserializeNode(invalidData, 0)
+ if err == nil {
+ t.Fatal("Expected error for invalid type byte, got nil")
+ }
+}
+
+// TestDeserializeInvalidLength tests deserialization with invalid data length
+func TestDeserializeInvalidLength(t *testing.T) {
+ // InternalNode with type byte 1 but wrong length
+ invalidData := []byte{nodeTypeInternal, 0, 0} // Too short for internal node
+
+ _, err := DeserializeNode(invalidData, 0)
+ if err == nil {
+ t.Fatal("Expected error for invalid data length, got nil")
+ }
+
+ if err.Error() != "invalid serialized node length" {
+ t.Errorf("Expected 'invalid serialized node length' error, got: %v", err)
+ }
+}
+
+// TestKeyToPath tests the keyToPath function
+func TestKeyToPath(t *testing.T) {
+ tests := []struct {
+ name string
+ depth int
+ key []byte
+ expected []byte
+ wantErr bool
+ }{
+ {
+ name: "depth 0",
+ depth: 0,
+ key: []byte{0x80}, // 10000000 in binary
+ expected: []byte{1},
+ wantErr: false,
+ },
+ {
+ name: "depth 7",
+ depth: 7,
+ key: []byte{0xFF}, // 11111111 in binary
+ expected: []byte{1, 1, 1, 1, 1, 1, 1, 1},
+ wantErr: false,
+ },
+ {
+ name: "depth crossing byte boundary",
+ depth: 10,
+ key: []byte{0xFF, 0x00}, // 11111111 00000000 in binary
+ expected: []byte{1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0},
+ wantErr: false,
+ },
+ {
+ name: "max valid depth",
+ depth: 31 * 8,
+ key: make([]byte, 32),
+ expected: make([]byte, 31*8+1),
+ wantErr: false,
+ },
+ {
+ name: "depth too large",
+ depth: 31*8 + 1,
+ key: make([]byte, 32),
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ path, err := keyToPath(tt.depth, tt.key)
+ if tt.wantErr {
+ if err == nil {
+ t.Errorf("Expected error for depth %d, got nil", tt.depth)
+ }
+ return
+ }
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ return
+ }
+ if !bytes.Equal(path, tt.expected) {
+ t.Errorf("Path mismatch: expected %v, got %v", tt.expected, path)
+ }
+ })
+ }
+}
diff --git a/trie/bintrie/empty.go b/trie/bintrie/empty.go
new file mode 100644
index 0000000000..7cfe373b35
--- /dev/null
+++ b/trie/bintrie/empty.go
@@ -0,0 +1,72 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "slices"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type Empty struct{}
+
+func (e Empty) Get(_ []byte, _ NodeResolverFn) ([]byte, error) {
+ return nil, nil
+}
+
+func (e Empty) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
+ var values [256][]byte
+ values[key[31]] = value
+ return &StemNode{
+ Stem: slices.Clone(key[:31]),
+ Values: values[:],
+ depth: depth,
+ }, nil
+}
+
+func (e Empty) Copy() BinaryNode {
+ return Empty{}
+}
+
+func (e Empty) Hash() common.Hash {
+ return common.Hash{}
+}
+
+func (e Empty) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) {
+ var values [256][]byte
+ return values[:], nil
+}
+
+func (e Empty) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
+ return &StemNode{
+ Stem: slices.Clone(key[:31]),
+ Values: values,
+ depth: depth,
+ }, nil
+}
+
+func (e Empty) CollectNodes(_ []byte, _ NodeFlushFn) error {
+ return nil
+}
+
+func (e Empty) toDot(parent string, path string) string {
+ return ""
+}
+
+func (e Empty) GetHeight() int {
+ return 0
+}
diff --git a/trie/bintrie/empty_test.go b/trie/bintrie/empty_test.go
new file mode 100644
index 0000000000..574ae1830b
--- /dev/null
+++ b/trie/bintrie/empty_test.go
@@ -0,0 +1,222 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// TestEmptyGet tests the Get method
+func TestEmptyGet(t *testing.T) {
+ node := Empty{}
+
+ key := make([]byte, 32)
+ value, err := node.Get(key, nil)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ if value != nil {
+ t.Errorf("Expected nil value from empty node, got %x", value)
+ }
+}
+
+// TestEmptyInsert tests the Insert method
+func TestEmptyInsert(t *testing.T) {
+ node := Empty{}
+
+ key := make([]byte, 32)
+ key[0] = 0x12
+ key[31] = 0x34
+ value := common.HexToHash("0xabcd").Bytes()
+
+ newNode, err := node.Insert(key, value, nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert: %v", err)
+ }
+
+ // Should create a StemNode
+ stemNode, ok := newNode.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", newNode)
+ }
+
+ // Check the stem (first 31 bytes of key)
+ if !bytes.Equal(stemNode.Stem, key[:31]) {
+ t.Errorf("Stem mismatch: expected %x, got %x", key[:31], stemNode.Stem)
+ }
+
+ // Check the value at the correct index (last byte of key)
+ if !bytes.Equal(stemNode.Values[key[31]], value) {
+ t.Errorf("Value mismatch at index %d: expected %x, got %x", key[31], value, stemNode.Values[key[31]])
+ }
+
+ // Check that other values are nil
+ for i := 0; i < 256; i++ {
+ if i != int(key[31]) && stemNode.Values[i] != nil {
+ t.Errorf("Expected nil value at index %d, got %x", i, stemNode.Values[i])
+ }
+ }
+}
+
+// TestEmptyCopy tests the Copy method
+func TestEmptyCopy(t *testing.T) {
+ node := Empty{}
+
+ copied := node.Copy()
+ copiedEmpty, ok := copied.(Empty)
+ if !ok {
+ t.Fatalf("Expected Empty, got %T", copied)
+ }
+
+ // Both should be empty
+ if node != copiedEmpty {
+ // Empty is a zero-value struct, so copies should be equal
+ t.Errorf("Empty nodes should be equal")
+ }
+}
+
+// TestEmptyHash tests the Hash method
+func TestEmptyHash(t *testing.T) {
+ node := Empty{}
+
+ hash := node.Hash()
+
+ // Empty node should have zero hash
+ if hash != (common.Hash{}) {
+ t.Errorf("Expected zero hash for empty node, got %x", hash)
+ }
+}
+
+// TestEmptyGetValuesAtStem tests the GetValuesAtStem method
+func TestEmptyGetValuesAtStem(t *testing.T) {
+ node := Empty{}
+
+ stem := make([]byte, 31)
+ values, err := node.GetValuesAtStem(stem, nil)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // Should return an array of 256 nil values
+ if len(values) != 256 {
+ t.Errorf("Expected 256 values, got %d", len(values))
+ }
+
+ for i, v := range values {
+ if v != nil {
+ t.Errorf("Expected nil value at index %d, got %x", i, v)
+ }
+ }
+}
+
+// TestEmptyInsertValuesAtStem tests the InsertValuesAtStem method
+func TestEmptyInsertValuesAtStem(t *testing.T) {
+ node := Empty{}
+
+ stem := make([]byte, 31)
+ stem[0] = 0x42
+
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+ values[10] = common.HexToHash("0x0202").Bytes()
+ values[255] = common.HexToHash("0x0303").Bytes()
+
+ newNode, err := node.InsertValuesAtStem(stem, values[:], nil, 5)
+ if err != nil {
+ t.Fatalf("Failed to insert values: %v", err)
+ }
+
+ // Should create a StemNode
+ stemNode, ok := newNode.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", newNode)
+ }
+
+ // Check the stem
+ if !bytes.Equal(stemNode.Stem, stem) {
+ t.Errorf("Stem mismatch: expected %x, got %x", stem, stemNode.Stem)
+ }
+
+ // Check the depth
+ if stemNode.depth != 5 {
+ t.Errorf("Depth mismatch: expected 5, got %d", stemNode.depth)
+ }
+
+ // Check the values
+ if !bytes.Equal(stemNode.Values[0], values[0]) {
+ t.Error("Value at index 0 mismatch")
+ }
+ if !bytes.Equal(stemNode.Values[10], values[10]) {
+ t.Error("Value at index 10 mismatch")
+ }
+ if !bytes.Equal(stemNode.Values[255], values[255]) {
+ t.Error("Value at index 255 mismatch")
+ }
+
+ // Check that values is the same slice (not a copy)
+ if &stemNode.Values[0] != &values[0] {
+ t.Error("Expected values to be the same slice reference")
+ }
+}
+
+// TestEmptyCollectNodes tests the CollectNodes method
+func TestEmptyCollectNodes(t *testing.T) {
+ node := Empty{}
+
+ var collected []BinaryNode
+ flushFn := func(path []byte, n BinaryNode) {
+ collected = append(collected, n)
+ }
+
+ err := node.CollectNodes([]byte{0, 1, 0}, flushFn)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ // Should not collect anything for empty node
+ if len(collected) != 0 {
+ t.Errorf("Expected no collected nodes for empty, got %d", len(collected))
+ }
+}
+
+// TestEmptyToDot tests the toDot method
+func TestEmptyToDot(t *testing.T) {
+ node := Empty{}
+
+ dot := node.toDot("parent", "010")
+
+ // Should return empty string for empty node
+ if dot != "" {
+ t.Errorf("Expected empty string for empty node toDot, got %s", dot)
+ }
+}
+
+// TestEmptyGetHeight tests the GetHeight method
+func TestEmptyGetHeight(t *testing.T) {
+ node := Empty{}
+
+ height := node.GetHeight()
+
+ // Empty node should have height 0
+ if height != 0 {
+ t.Errorf("Expected height 0 for empty node, got %d", height)
+ }
+}
diff --git a/trie/bintrie/hashed_node.go b/trie/bintrie/hashed_node.go
new file mode 100644
index 0000000000..8f9fd66a59
--- /dev/null
+++ b/trie/bintrie/hashed_node.go
@@ -0,0 +1,66 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type HashedNode common.Hash
+
+func (h HashedNode) Get(_ []byte, _ NodeResolverFn) ([]byte, error) {
+ panic("not implemented") // TODO: Implement
+}
+
+func (h HashedNode) Insert(key []byte, value []byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
+ return nil, errors.New("insert not implemented for hashed node")
+}
+
+func (h HashedNode) Copy() BinaryNode {
+ nh := common.Hash(h)
+ return HashedNode(nh)
+}
+
+func (h HashedNode) Hash() common.Hash {
+ return common.Hash(h)
+}
+
+func (h HashedNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) {
+ return nil, errors.New("attempted to get values from an unresolved node")
+}
+
+func (h HashedNode) InsertValuesAtStem(key []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
+ return nil, errors.New("insertValuesAtStem not implemented for hashed node")
+}
+
+func (h HashedNode) toDot(parent string, path string) string {
+ me := fmt.Sprintf("hash%s", path)
+ ret := fmt.Sprintf("%s [label=\"%x\"]\n", me, h)
+ ret = fmt.Sprintf("%s %s -> %s\n", ret, parent, me)
+ return ret
+}
+
+func (h HashedNode) CollectNodes([]byte, NodeFlushFn) error {
+ return errors.New("collectNodes not implemented for hashed node")
+}
+
+func (h HashedNode) GetHeight() int {
+ panic("tried to get the height of a hashed node, this is a bug")
+}
diff --git a/trie/bintrie/hashed_node_test.go b/trie/bintrie/hashed_node_test.go
new file mode 100644
index 0000000000..0c19ae0c57
--- /dev/null
+++ b/trie/bintrie/hashed_node_test.go
@@ -0,0 +1,128 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// TestHashedNodeHash tests the Hash method
+func TestHashedNodeHash(t *testing.T) {
+ hash := common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
+ node := HashedNode(hash)
+
+ // Hash should return the stored hash
+ if node.Hash() != hash {
+ t.Errorf("Hash mismatch: expected %x, got %x", hash, node.Hash())
+ }
+}
+
+// TestHashedNodeCopy tests the Copy method
+func TestHashedNodeCopy(t *testing.T) {
+ hash := common.HexToHash("0xabcdef")
+ node := HashedNode(hash)
+
+ copied := node.Copy()
+ copiedHash, ok := copied.(HashedNode)
+ if !ok {
+ t.Fatalf("Expected HashedNode, got %T", copied)
+ }
+
+ // Hash should be the same
+ if common.Hash(copiedHash) != hash {
+ t.Errorf("Hash mismatch after copy: expected %x, got %x", hash, copiedHash)
+ }
+
+ // But should be a different object
+ if &node == &copiedHash {
+ t.Error("Copy returned same object reference")
+ }
+}
+
+// TestHashedNodeInsert tests that Insert returns an error
+func TestHashedNodeInsert(t *testing.T) {
+ node := HashedNode(common.HexToHash("0x1234"))
+
+ key := make([]byte, 32)
+ value := make([]byte, 32)
+
+ _, err := node.Insert(key, value, nil, 0)
+ if err == nil {
+ t.Fatal("Expected error for Insert on HashedNode")
+ }
+
+ if err.Error() != "insert not implemented for hashed node" {
+ t.Errorf("Unexpected error message: %v", err)
+ }
+}
+
+// TestHashedNodeGetValuesAtStem tests that GetValuesAtStem returns an error
+func TestHashedNodeGetValuesAtStem(t *testing.T) {
+ node := HashedNode(common.HexToHash("0x1234"))
+
+ stem := make([]byte, 31)
+ _, err := node.GetValuesAtStem(stem, nil)
+ if err == nil {
+ t.Fatal("Expected error for GetValuesAtStem on HashedNode")
+ }
+
+ if err.Error() != "attempted to get values from an unresolved node" {
+ t.Errorf("Unexpected error message: %v", err)
+ }
+}
+
+// TestHashedNodeInsertValuesAtStem tests that InsertValuesAtStem returns an error
+func TestHashedNodeInsertValuesAtStem(t *testing.T) {
+ node := HashedNode(common.HexToHash("0x1234"))
+
+ stem := make([]byte, 31)
+ values := make([][]byte, 256)
+
+ _, err := node.InsertValuesAtStem(stem, values, nil, 0)
+ if err == nil {
+ t.Fatal("Expected error for InsertValuesAtStem on HashedNode")
+ }
+
+ if err.Error() != "insertValuesAtStem not implemented for hashed node" {
+ t.Errorf("Unexpected error message: %v", err)
+ }
+}
+
+// TestHashedNodeToDot tests the toDot method for visualization
+func TestHashedNodeToDot(t *testing.T) {
+ hash := common.HexToHash("0x1234")
+ node := HashedNode(hash)
+
+ dot := node.toDot("parent", "010")
+
+ // Should contain the hash value and parent connection
+ expectedHash := "hash010"
+ if !contains(dot, expectedHash) {
+ t.Errorf("Expected dot output to contain %s", expectedHash)
+ }
+
+ if !contains(dot, "parent -> hash010") {
+ t.Error("Expected dot output to contain parent connection")
+ }
+}
+
+// Helper function
+func contains(s, substr string) bool {
+ return len(s) >= len(substr) && s != "" && len(substr) > 0
+}
diff --git a/trie/bintrie/internal_node.go b/trie/bintrie/internal_node.go
new file mode 100644
index 0000000000..f3ddd1aab0
--- /dev/null
+++ b/trie/bintrie/internal_node.go
@@ -0,0 +1,189 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "crypto/sha256"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func keyToPath(depth int, key []byte) ([]byte, error) {
+ if depth > 31*8 {
+ return nil, errors.New("node too deep")
+ }
+ path := make([]byte, 0, depth+1)
+ for i := range depth + 1 {
+ bit := key[i/8] >> (7 - (i % 8)) & 1
+ path = append(path, bit)
+ }
+ return path, nil
+}
+
+// InternalNode is a binary trie internal node.
+type InternalNode struct {
+ left, right BinaryNode
+ depth int
+}
+
+// GetValuesAtStem retrieves the group of values located at the given stem key.
+func (bt *InternalNode) GetValuesAtStem(stem []byte, resolver NodeResolverFn) ([][]byte, error) {
+ if bt.depth > 31*8 {
+ return nil, errors.New("node too deep")
+ }
+
+ bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
+ var child *BinaryNode
+ if bit == 0 {
+ child = &bt.left
+ } else {
+ child = &bt.right
+ }
+
+ if hn, ok := (*child).(HashedNode); ok {
+ path, err := keyToPath(bt.depth, stem)
+ if err != nil {
+ return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err)
+ }
+ data, err := resolver(path, common.Hash(hn))
+ if err != nil {
+ return nil, fmt.Errorf("GetValuesAtStem resolve error: %w", err)
+ }
+ node, err := DeserializeNode(data, bt.depth+1)
+ if err != nil {
+ return nil, fmt.Errorf("GetValuesAtStem node deserialization error: %w", err)
+ }
+ *child = node
+ }
+ return (*child).GetValuesAtStem(stem, resolver)
+}
+
+// Get retrieves the value for the given key.
+func (bt *InternalNode) Get(key []byte, resolver NodeResolverFn) ([]byte, error) {
+ values, err := bt.GetValuesAtStem(key[:31], resolver)
+ if err != nil {
+ return nil, fmt.Errorf("get error: %w", err)
+ }
+ return values[key[31]], nil
+}
+
+// Insert inserts a new key-value pair into the trie.
+func (bt *InternalNode) Insert(key []byte, value []byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
+ var values [256][]byte
+ values[key[31]] = value
+ return bt.InsertValuesAtStem(key[:31], values[:], resolver, depth)
+}
+
+// Copy creates a deep copy of the node.
+func (bt *InternalNode) Copy() BinaryNode {
+ return &InternalNode{
+ left: bt.left.Copy(),
+ right: bt.right.Copy(),
+ depth: bt.depth,
+ }
+}
+
+// Hash returns the hash of the node.
+func (bt *InternalNode) Hash() common.Hash {
+ h := sha256.New()
+ if bt.left != nil {
+ h.Write(bt.left.Hash().Bytes())
+ } else {
+ h.Write(zero[:])
+ }
+ if bt.right != nil {
+ h.Write(bt.right.Hash().Bytes())
+ } else {
+ h.Write(zero[:])
+ }
+ return common.BytesToHash(h.Sum(nil))
+}
+
+// InsertValuesAtStem inserts a full value group at the given stem in the internal node.
+// Already-existing values will be overwritten.
+func (bt *InternalNode) InsertValuesAtStem(stem []byte, values [][]byte, resolver NodeResolverFn, depth int) (BinaryNode, error) {
+ var (
+ child *BinaryNode
+ err error
+ )
+ bit := stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
+ if bit == 0 {
+ child = &bt.left
+ } else {
+ child = &bt.right
+ }
+ *child, err = (*child).InsertValuesAtStem(stem, values, resolver, depth+1)
+ return bt, err
+}
+
+// CollectNodes collects all child nodes at a given path, and flushes it
+// into the provided node collector.
+func (bt *InternalNode) CollectNodes(path []byte, flushfn NodeFlushFn) error {
+ if bt.left != nil {
+ var p [256]byte
+ copy(p[:], path)
+ childpath := p[:len(path)]
+ childpath = append(childpath, 0)
+ if err := bt.left.CollectNodes(childpath, flushfn); err != nil {
+ return err
+ }
+ }
+ if bt.right != nil {
+ var p [256]byte
+ copy(p[:], path)
+ childpath := p[:len(path)]
+ childpath = append(childpath, 1)
+ if err := bt.right.CollectNodes(childpath, flushfn); err != nil {
+ return err
+ }
+ }
+ flushfn(path, bt)
+ return nil
+}
+
+// GetHeight returns the height of the node.
+func (bt *InternalNode) GetHeight() int {
+ var (
+ leftHeight int
+ rightHeight int
+ )
+ if bt.left != nil {
+ leftHeight = bt.left.GetHeight()
+ }
+ if bt.right != nil {
+ rightHeight = bt.right.GetHeight()
+ }
+ return 1 + max(leftHeight, rightHeight)
+}
+
+func (bt *InternalNode) toDot(parent, path string) string {
+ me := fmt.Sprintf("internal%s", path)
+ ret := fmt.Sprintf("%s [label=\"I: %x\"]\n", me, bt.Hash())
+ if len(parent) > 0 {
+ ret = fmt.Sprintf("%s %s -> %s\n", ret, parent, me)
+ }
+
+ if bt.left != nil {
+ ret = fmt.Sprintf("%s%s", ret, bt.left.toDot(me, fmt.Sprintf("%s%02x", path, 0)))
+ }
+ if bt.right != nil {
+ ret = fmt.Sprintf("%s%s", ret, bt.right.toDot(me, fmt.Sprintf("%s%02x", path, 1)))
+ }
+ return ret
+}
diff --git a/trie/bintrie/internal_node_test.go b/trie/bintrie/internal_node_test.go
new file mode 100644
index 0000000000..158d8b7147
--- /dev/null
+++ b/trie/bintrie/internal_node_test.go
@@ -0,0 +1,458 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "errors"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// TestInternalNodeGet tests the Get method
+func TestInternalNodeGet(t *testing.T) {
+ // Create a simple tree structure
+ leftStem := make([]byte, 31)
+ rightStem := make([]byte, 31)
+ rightStem[0] = 0x80 // First bit is 1
+
+ var leftValues, rightValues [256][]byte
+ leftValues[0] = common.HexToHash("0x0101").Bytes()
+ rightValues[0] = common.HexToHash("0x0202").Bytes()
+
+ node := &InternalNode{
+ depth: 0,
+ left: &StemNode{
+ Stem: leftStem,
+ Values: leftValues[:],
+ depth: 1,
+ },
+ right: &StemNode{
+ Stem: rightStem,
+ Values: rightValues[:],
+ depth: 1,
+ },
+ }
+
+ // Get value from left subtree
+ leftKey := make([]byte, 32)
+ leftKey[31] = 0
+ value, err := node.Get(leftKey, nil)
+ if err != nil {
+ t.Fatalf("Failed to get left value: %v", err)
+ }
+ if !bytes.Equal(value, leftValues[0]) {
+ t.Errorf("Left value mismatch: expected %x, got %x", leftValues[0], value)
+ }
+
+ // Get value from right subtree
+ rightKey := make([]byte, 32)
+ rightKey[0] = 0x80
+ rightKey[31] = 0
+ value, err = node.Get(rightKey, nil)
+ if err != nil {
+ t.Fatalf("Failed to get right value: %v", err)
+ }
+ if !bytes.Equal(value, rightValues[0]) {
+ t.Errorf("Right value mismatch: expected %x, got %x", rightValues[0], value)
+ }
+}
+
+// TestInternalNodeGetWithResolver tests Get with HashedNode resolution
+func TestInternalNodeGetWithResolver(t *testing.T) {
+ // Create an internal node with a hashed child
+ hashedChild := HashedNode(common.HexToHash("0x1234"))
+
+ node := &InternalNode{
+ depth: 0,
+ left: hashedChild,
+ right: Empty{},
+ }
+
+ // Mock resolver that returns a stem node
+ resolver := func(path []byte, hash common.Hash) ([]byte, error) {
+ if hash == common.Hash(hashedChild) {
+ stem := make([]byte, 31)
+ var values [256][]byte
+ values[5] = common.HexToHash("0xabcd").Bytes()
+ stemNode := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 1,
+ }
+ return SerializeNode(stemNode), nil
+ }
+ return nil, errors.New("node not found")
+ }
+
+ // Get value through the hashed node
+ key := make([]byte, 32)
+ key[31] = 5
+ value, err := node.Get(key, resolver)
+ if err != nil {
+ t.Fatalf("Failed to get value: %v", err)
+ }
+
+ expectedValue := common.HexToHash("0xabcd").Bytes()
+ if !bytes.Equal(value, expectedValue) {
+ t.Errorf("Value mismatch: expected %x, got %x", expectedValue, value)
+ }
+}
+
+// TestInternalNodeInsert tests the Insert method
+func TestInternalNodeInsert(t *testing.T) {
+ // Start with an internal node with empty children
+ node := &InternalNode{
+ depth: 0,
+ left: Empty{},
+ right: Empty{},
+ }
+
+ // Insert a value into the left subtree
+ leftKey := make([]byte, 32)
+ leftKey[31] = 10
+ leftValue := common.HexToHash("0x0101").Bytes()
+
+ newNode, err := node.Insert(leftKey, leftValue, nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert: %v", err)
+ }
+
+ internalNode, ok := newNode.(*InternalNode)
+ if !ok {
+ t.Fatalf("Expected InternalNode, got %T", newNode)
+ }
+
+ // Check that left child is now a StemNode
+ leftStem, ok := internalNode.left.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected left child to be StemNode, got %T", internalNode.left)
+ }
+
+ // Check the inserted value
+ if !bytes.Equal(leftStem.Values[10], leftValue) {
+ t.Errorf("Value mismatch: expected %x, got %x", leftValue, leftStem.Values[10])
+ }
+
+ // Right child should still be Empty
+ _, ok = internalNode.right.(Empty)
+ if !ok {
+ t.Errorf("Expected right child to remain Empty, got %T", internalNode.right)
+ }
+}
+
+// TestInternalNodeCopy tests the Copy method
+func TestInternalNodeCopy(t *testing.T) {
+ // Create an internal node with stem children
+ leftStem := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 1,
+ }
+ leftStem.Values[0] = common.HexToHash("0x0101").Bytes()
+
+ rightStem := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 1,
+ }
+ rightStem.Stem[0] = 0x80
+ rightStem.Values[0] = common.HexToHash("0x0202").Bytes()
+
+ node := &InternalNode{
+ depth: 0,
+ left: leftStem,
+ right: rightStem,
+ }
+
+ // Create a copy
+ copied := node.Copy()
+ copiedInternal, ok := copied.(*InternalNode)
+ if !ok {
+ t.Fatalf("Expected InternalNode, got %T", copied)
+ }
+
+ // Check depth
+ if copiedInternal.depth != node.depth {
+ t.Errorf("Depth mismatch: expected %d, got %d", node.depth, copiedInternal.depth)
+ }
+
+ // Check that children are copied
+ copiedLeft, ok := copiedInternal.left.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected left child to be StemNode, got %T", copiedInternal.left)
+ }
+
+ copiedRight, ok := copiedInternal.right.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected right child to be StemNode, got %T", copiedInternal.right)
+ }
+
+ // Verify deep copy (children should be different objects)
+ if copiedLeft == leftStem {
+ t.Error("Left child not properly copied")
+ }
+ if copiedRight == rightStem {
+ t.Error("Right child not properly copied")
+ }
+
+ // But values should be equal
+ if !bytes.Equal(copiedLeft.Values[0], leftStem.Values[0]) {
+ t.Error("Left child value mismatch after copy")
+ }
+ if !bytes.Equal(copiedRight.Values[0], rightStem.Values[0]) {
+ t.Error("Right child value mismatch after copy")
+ }
+}
+
+// TestInternalNodeHash tests the Hash method
+func TestInternalNodeHash(t *testing.T) {
+ // Create an internal node
+ node := &InternalNode{
+ depth: 0,
+ left: HashedNode(common.HexToHash("0x1111")),
+ right: HashedNode(common.HexToHash("0x2222")),
+ }
+
+ hash1 := node.Hash()
+
+ // Hash should be deterministic
+ hash2 := node.Hash()
+ if hash1 != hash2 {
+ t.Errorf("Hash not deterministic: %x != %x", hash1, hash2)
+ }
+
+ // Changing a child should change the hash
+ node.left = HashedNode(common.HexToHash("0x3333"))
+ hash3 := node.Hash()
+ if hash1 == hash3 {
+ t.Error("Hash didn't change after modifying left child")
+ }
+
+ // Test with nil children (should use zero hash)
+ nodeWithNil := &InternalNode{
+ depth: 0,
+ left: nil,
+ right: HashedNode(common.HexToHash("0x4444")),
+ }
+ hashWithNil := nodeWithNil.Hash()
+ if hashWithNil == (common.Hash{}) {
+ t.Error("Hash shouldn't be zero even with nil child")
+ }
+}
+
+// TestInternalNodeGetValuesAtStem tests GetValuesAtStem method
+func TestInternalNodeGetValuesAtStem(t *testing.T) {
+ // Create a tree with values at different stems
+ leftStem := make([]byte, 31)
+ rightStem := make([]byte, 31)
+ rightStem[0] = 0x80
+
+ var leftValues, rightValues [256][]byte
+ leftValues[0] = common.HexToHash("0x0101").Bytes()
+ leftValues[10] = common.HexToHash("0x0102").Bytes()
+ rightValues[0] = common.HexToHash("0x0201").Bytes()
+ rightValues[20] = common.HexToHash("0x0202").Bytes()
+
+ node := &InternalNode{
+ depth: 0,
+ left: &StemNode{
+ Stem: leftStem,
+ Values: leftValues[:],
+ depth: 1,
+ },
+ right: &StemNode{
+ Stem: rightStem,
+ Values: rightValues[:],
+ depth: 1,
+ },
+ }
+
+ // Get values from left stem
+ values, err := node.GetValuesAtStem(leftStem, nil)
+ if err != nil {
+ t.Fatalf("Failed to get left values: %v", err)
+ }
+ if !bytes.Equal(values[0], leftValues[0]) {
+ t.Error("Left value at index 0 mismatch")
+ }
+ if !bytes.Equal(values[10], leftValues[10]) {
+ t.Error("Left value at index 10 mismatch")
+ }
+
+ // Get values from right stem
+ values, err = node.GetValuesAtStem(rightStem, nil)
+ if err != nil {
+ t.Fatalf("Failed to get right values: %v", err)
+ }
+ if !bytes.Equal(values[0], rightValues[0]) {
+ t.Error("Right value at index 0 mismatch")
+ }
+ if !bytes.Equal(values[20], rightValues[20]) {
+ t.Error("Right value at index 20 mismatch")
+ }
+}
+
+// TestInternalNodeInsertValuesAtStem tests InsertValuesAtStem method
+func TestInternalNodeInsertValuesAtStem(t *testing.T) {
+ // Start with an internal node with empty children
+ node := &InternalNode{
+ depth: 0,
+ left: Empty{},
+ right: Empty{},
+ }
+
+ // Insert values at a stem in the left subtree
+ stem := make([]byte, 31)
+ var values [256][]byte
+ values[5] = common.HexToHash("0x0505").Bytes()
+ values[10] = common.HexToHash("0x1010").Bytes()
+
+ newNode, err := node.InsertValuesAtStem(stem, values[:], nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert values: %v", err)
+ }
+
+ internalNode, ok := newNode.(*InternalNode)
+ if !ok {
+ t.Fatalf("Expected InternalNode, got %T", newNode)
+ }
+
+ // Check that left child is now a StemNode with the values
+ leftStem, ok := internalNode.left.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected left child to be StemNode, got %T", internalNode.left)
+ }
+
+ if !bytes.Equal(leftStem.Values[5], values[5]) {
+ t.Error("Value at index 5 mismatch")
+ }
+ if !bytes.Equal(leftStem.Values[10], values[10]) {
+ t.Error("Value at index 10 mismatch")
+ }
+}
+
+// TestInternalNodeCollectNodes tests CollectNodes method
+func TestInternalNodeCollectNodes(t *testing.T) {
+ // Create an internal node with two stem children
+ leftStem := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 1,
+ }
+
+ rightStem := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 1,
+ }
+ rightStem.Stem[0] = 0x80
+
+ node := &InternalNode{
+ depth: 0,
+ left: leftStem,
+ right: rightStem,
+ }
+
+ var collectedPaths [][]byte
+ var collectedNodes []BinaryNode
+
+ flushFn := func(path []byte, n BinaryNode) {
+ pathCopy := make([]byte, len(path))
+ copy(pathCopy, path)
+ collectedPaths = append(collectedPaths, pathCopy)
+ collectedNodes = append(collectedNodes, n)
+ }
+
+ err := node.CollectNodes([]byte{1}, flushFn)
+ if err != nil {
+ t.Fatalf("Failed to collect nodes: %v", err)
+ }
+
+ // Should have collected 3 nodes: left stem, right stem, and the internal node itself
+ if len(collectedNodes) != 3 {
+ t.Errorf("Expected 3 collected nodes, got %d", len(collectedNodes))
+ }
+
+ // Check paths
+ expectedPaths := [][]byte{
+ {1, 0}, // left child
+ {1, 1}, // right child
+ {1}, // internal node itself
+ }
+
+ for i, expectedPath := range expectedPaths {
+ if !bytes.Equal(collectedPaths[i], expectedPath) {
+ t.Errorf("Path %d mismatch: expected %v, got %v", i, expectedPath, collectedPaths[i])
+ }
+ }
+}
+
+// TestInternalNodeGetHeight tests GetHeight method
+func TestInternalNodeGetHeight(t *testing.T) {
+ // Create a tree with different heights
+ // Left subtree: depth 2 (internal -> stem)
+ // Right subtree: depth 1 (stem)
+ leftInternal := &InternalNode{
+ depth: 1,
+ left: &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 2,
+ },
+ right: Empty{},
+ }
+
+ rightStem := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 1,
+ }
+
+ node := &InternalNode{
+ depth: 0,
+ left: leftInternal,
+ right: rightStem,
+ }
+
+ height := node.GetHeight()
+ // Height should be max(left height, right height) + 1
+ // Left height: 2, Right height: 1, so total: 3
+ if height != 3 {
+ t.Errorf("Expected height 3, got %d", height)
+ }
+}
+
+// TestInternalNodeDepthTooLarge tests handling of excessive depth
+func TestInternalNodeDepthTooLarge(t *testing.T) {
+ // Create an internal node at max depth
+ node := &InternalNode{
+ depth: 31*8 + 1,
+ left: Empty{},
+ right: Empty{},
+ }
+
+ stem := make([]byte, 31)
+ _, err := node.GetValuesAtStem(stem, nil)
+ if err == nil {
+ t.Fatal("Expected error for excessive depth")
+ }
+ if err.Error() != "node too deep" {
+ t.Errorf("Expected 'node too deep' error, got: %v", err)
+ }
+}
diff --git a/trie/bintrie/iterator.go b/trie/bintrie/iterator.go
new file mode 100644
index 0000000000..a6bab2bcfa
--- /dev/null
+++ b/trie/bintrie/iterator.go
@@ -0,0 +1,261 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "errors"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+var errIteratorEnd = errors.New("end of iteration")
+
+type binaryNodeIteratorState struct {
+ Node BinaryNode
+ Index int
+}
+
+type binaryNodeIterator struct {
+ trie *BinaryTrie
+ current BinaryNode
+ lastErr error
+
+ stack []binaryNodeIteratorState
+}
+
+func newBinaryNodeIterator(t *BinaryTrie, _ []byte) (trie.NodeIterator, error) {
+ if t.Hash() == zero {
+ return &binaryNodeIterator{trie: t, lastErr: errIteratorEnd}, nil
+ }
+ it := &binaryNodeIterator{trie: t, current: t.root}
+ // it.err = it.seek(start)
+ return it, nil
+}
+
+// Next moves the iterator to the next node. If the parameter is false, any child
+// nodes will be skipped.
+func (it *binaryNodeIterator) Next(descend bool) bool {
+ if it.lastErr == errIteratorEnd {
+ it.lastErr = errIteratorEnd
+ return false
+ }
+
+ if len(it.stack) == 0 {
+ it.stack = append(it.stack, binaryNodeIteratorState{Node: it.trie.root})
+ it.current = it.trie.root
+
+ return true
+ }
+
+ switch node := it.current.(type) {
+ case *InternalNode:
+ // index: 0 = nothing visited, 1=left visited, 2=right visited
+ context := &it.stack[len(it.stack)-1]
+
+ // recurse into both children
+ if context.Index == 0 {
+ if _, isempty := node.left.(Empty); node.left != nil && !isempty {
+ it.stack = append(it.stack, binaryNodeIteratorState{Node: node.left})
+ it.current = node.left
+ return it.Next(descend)
+ }
+
+ context.Index++
+ }
+
+ if context.Index == 1 {
+ if _, isempty := node.right.(Empty); node.right != nil && !isempty {
+ it.stack = append(it.stack, binaryNodeIteratorState{Node: node.right})
+ it.current = node.right
+ return it.Next(descend)
+ }
+
+ context.Index++
+ }
+
+ // Reached the end of this node, go back to the parent, if
+ // this isn't root.
+ if len(it.stack) == 1 {
+ it.lastErr = errIteratorEnd
+ return false
+ }
+ it.stack = it.stack[:len(it.stack)-1]
+ it.current = it.stack[len(it.stack)-1].Node
+ it.stack[len(it.stack)-1].Index++
+ return it.Next(descend)
+ case *StemNode:
+ // Look for the next non-empty value
+ for i := it.stack[len(it.stack)-1].Index; i < 256; i++ {
+ if node.Values[i] != nil {
+ it.stack[len(it.stack)-1].Index = i + 1
+ return true
+ }
+ }
+
+ // go back to parent to get the next leaf
+ it.stack = it.stack[:len(it.stack)-1]
+ it.current = it.stack[len(it.stack)-1].Node
+ it.stack[len(it.stack)-1].Index++
+ return it.Next(descend)
+ case HashedNode:
+ // resolve the node
+ data, err := it.trie.nodeResolver(it.Path(), common.Hash(node))
+ if err != nil {
+ panic(err)
+ }
+ it.current, err = DeserializeNode(data, len(it.stack)-1)
+ if err != nil {
+ panic(err)
+ }
+
+ // update the stack and parent with the resolved node
+ it.stack[len(it.stack)-1].Node = it.current
+ parent := &it.stack[len(it.stack)-2]
+ if parent.Index == 0 {
+ parent.Node.(*InternalNode).left = it.current
+ } else {
+ parent.Node.(*InternalNode).right = it.current
+ }
+ return it.Next(descend)
+ case Empty:
+ // do nothing
+ return false
+ default:
+ panic("invalid node type")
+ }
+}
+
+// Error returns the error status of the iterator.
+func (it *binaryNodeIterator) Error() error {
+ if it.lastErr == errIteratorEnd {
+ return nil
+ }
+ return it.lastErr
+}
+
+// Hash returns the hash of the current node.
+func (it *binaryNodeIterator) Hash() common.Hash {
+ return it.current.Hash()
+}
+
+// Parent returns the hash of the parent of the current node. The hash may be the one
+// grandparent if the immediate parent is an internal node with no hash.
+func (it *binaryNodeIterator) Parent() common.Hash {
+ return it.stack[len(it.stack)-1].Node.Hash()
+}
+
+// Path returns the hex-encoded path to the current node.
+// Callers must not retain references to the return value after calling Next.
+// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10.
+func (it *binaryNodeIterator) Path() []byte {
+ if it.Leaf() {
+ return it.LeafKey()
+ }
+ var path []byte
+ for i, state := range it.stack {
+ // skip the last byte
+ if i >= len(it.stack)-1 {
+ break
+ }
+ path = append(path, byte(state.Index))
+ }
+ return path
+}
+
+// NodeBlob returns the serialized bytes of the current node.
+func (it *binaryNodeIterator) NodeBlob() []byte {
+ return SerializeNode(it.current)
+}
+
+// Leaf returns true iff the current node is a leaf node.
+func (it *binaryNodeIterator) Leaf() bool {
+ _, ok := it.current.(*StemNode)
+ return ok
+}
+
+// LeafKey returns the key of the leaf. The method panics if the iterator is not
+// positioned at a leaf. Callers must not retain references to the value after
+// calling Next.
+func (it *binaryNodeIterator) LeafKey() []byte {
+ leaf, ok := it.current.(*StemNode)
+ if !ok {
+ panic("Leaf() called on an binary node iterator not at a leaf location")
+ }
+ return leaf.Key(it.stack[len(it.stack)-1].Index - 1)
+}
+
+// LeafBlob returns the content of the leaf. The method panics if the iterator
+// is not positioned at a leaf. Callers must not retain references to the value
+// after calling Next.
+func (it *binaryNodeIterator) LeafBlob() []byte {
+ leaf, ok := it.current.(*StemNode)
+ if !ok {
+ panic("LeafBlob() called on an binary node iterator not at a leaf location")
+ }
+ return leaf.Values[it.stack[len(it.stack)-1].Index-1]
+}
+
+// LeafProof returns the Merkle proof of the leaf. The method panics if the
+// iterator is not positioned at a leaf. Callers must not retain references
+// to the value after calling Next.
+func (it *binaryNodeIterator) LeafProof() [][]byte {
+ sn, ok := it.current.(*StemNode)
+ if !ok {
+ panic("LeafProof() called on an binary node iterator not at a leaf location")
+ }
+
+ proof := make([][]byte, 0, len(it.stack)+NodeWidth)
+
+ // Build proof by walking up the stack and collecting sibling hashes
+ for i := range it.stack[:len(it.stack)-2] {
+ state := it.stack[i]
+ internalNode := state.Node.(*InternalNode) // should panic if the node isn't an InternalNode
+
+ // Add the sibling hash to the proof
+ if state.Index == 0 {
+ // We came from left, so include right sibling
+ proof = append(proof, internalNode.right.Hash().Bytes())
+ } else {
+ // We came from right, so include left sibling
+ proof = append(proof, internalNode.left.Hash().Bytes())
+ }
+ }
+
+ // Add the stem and siblings
+ proof = append(proof, sn.Stem)
+ for _, v := range sn.Values {
+ proof = append(proof, v)
+ }
+
+ return proof
+}
+
+// AddResolver sets an intermediate database to use for looking up trie nodes
+// before reaching into the real persistent layer.
+//
+// This is not required for normal operation, rather is an optimization for
+// cases where trie nodes can be recovered from some external mechanism without
+// reading from disk. In those cases, this resolver allows short circuiting
+// accesses and returning them from memory.
+//
+// Before adding a similar mechanism to any other place in Geth, consider
+// making trie.Database an interface and wrapping at that level. It's a huge
+// refactor, but it could be worth it if another occurrence arises.
+func (it *binaryNodeIterator) AddResolver(trie.NodeResolver) {
+ // Not implemented, but should not panic
+}
diff --git a/trie/bintrie/iterator_test.go b/trie/bintrie/iterator_test.go
new file mode 100644
index 0000000000..8773e9e0c5
--- /dev/null
+++ b/trie/bintrie/iterator_test.go
@@ -0,0 +1,83 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
+ "github.com/holiman/uint256"
+)
+
+func newTestDatabase(diskdb ethdb.Database, scheme string) *triedb.Database {
+ config := &triedb.Config{Preimages: true}
+ if scheme == rawdb.HashScheme {
+ config.HashDB = &hashdb.Config{CleanCacheSize: 0}
+ } else {
+ config.PathDB = &pathdb.Config{TrieCleanSize: 0, StateCleanSize: 0}
+ }
+ return triedb.NewDatabase(diskdb, config)
+}
+
+func TestBinaryIterator(t *testing.T) {
+ trie, err := NewBinaryTrie(types.EmptyVerkleHash, newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme))
+ if err != nil {
+ t.Fatal(err)
+ }
+ account0 := &types.StateAccount{
+ Nonce: 1,
+ Balance: uint256.NewInt(2),
+ Root: types.EmptyRootHash,
+ CodeHash: nil,
+ }
+ // NOTE: the code size isn't written to the trie via TryUpdateAccount
+ // so it will be missing from the test nodes.
+ trie.UpdateAccount(common.Address{}, account0, 0)
+ account1 := &types.StateAccount{
+ Nonce: 1337,
+ Balance: uint256.NewInt(2000),
+ Root: types.EmptyRootHash,
+ CodeHash: nil,
+ }
+ // This address is meant to hash to a value that has the same first byte as 0xbf
+ var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be")
+ trie.UpdateAccount(clash, account1, 0)
+
+ // Manually go over every node to check that we get all
+ // the correct nodes.
+ it, err := trie.NodeIterator(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var leafcount int
+ for it.Next(true) {
+ t.Logf("Node: %x", it.Path())
+ if it.Leaf() {
+ leafcount++
+ t.Logf("\tLeaf: %x", it.LeafKey())
+ }
+ }
+ if leafcount != 2 {
+ t.Fatalf("invalid leaf count: %d != 6", leafcount)
+ }
+}
diff --git a/trie/bintrie/key_encoding.go b/trie/bintrie/key_encoding.go
new file mode 100644
index 0000000000..13c2057371
--- /dev/null
+++ b/trie/bintrie/key_encoding.go
@@ -0,0 +1,79 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "crypto/sha256"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+const (
+ BasicDataLeafKey = 0
+ CodeHashLeafKey = 1
+ BasicDataCodeSizeOffset = 5
+ BasicDataNonceOffset = 8
+ BasicDataBalanceOffset = 16
+)
+
+var (
+ zeroHash = common.Hash{}
+ codeOffset = uint256.NewInt(128)
+)
+
+func GetBinaryTreeKey(addr common.Address, key []byte) []byte {
+ hasher := sha256.New()
+ hasher.Write(zeroHash[:12])
+ hasher.Write(addr[:])
+ hasher.Write(key[:31])
+ k := hasher.Sum(nil)
+ k[31] = key[31]
+ return k
+}
+
+func GetBinaryTreeKeyCodeHash(addr common.Address) []byte {
+ var k [32]byte
+ k[31] = CodeHashLeafKey
+ return GetBinaryTreeKey(addr, k[:])
+}
+
+func GetBinaryTreeKeyStorageSlot(address common.Address, key []byte) []byte {
+ var k [32]byte
+
+ // Case when the key belongs to the account header
+ if bytes.Equal(key[:31], zeroHash[:31]) && key[31] < 64 {
+ k[31] = 64 + key[31]
+ return GetBinaryTreeKey(address, k[:])
+ }
+
+ // Set the main storage offset
+ // note that the first 64 bytes of the main offset storage
+ // are unreachable, which is consistent with the spec and
+ // what verkle does.
+ k[0] = 1 // 1 << 248
+ copy(k[1:], key[:31])
+ k[31] = key[31]
+
+ return GetBinaryTreeKey(address, k[:])
+}
+
+func GetBinaryTreeKeyCodeChunk(address common.Address, chunknr *uint256.Int) []byte {
+ chunkOffset := new(uint256.Int).Add(codeOffset, chunknr).Bytes()
+ return GetBinaryTreeKey(address, chunkOffset)
+}
diff --git a/trie/bintrie/stem_node.go b/trie/bintrie/stem_node.go
new file mode 100644
index 0000000000..50c06c9761
--- /dev/null
+++ b/trie/bintrie/stem_node.go
@@ -0,0 +1,213 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "slices"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// StemNode represents a group of `NodeWith` values sharing the same stem.
+type StemNode struct {
+ Stem []byte // Stem path to get to 256 values
+ Values [][]byte // All values, indexed by the last byte of the key.
+ depth int // Depth of the node
+}
+
+// Get retrieves the value for the given key.
+func (bt *StemNode) Get(key []byte, _ NodeResolverFn) ([]byte, error) {
+ panic("this should not be called directly")
+}
+
+// Insert inserts a new key-value pair into the node.
+func (bt *StemNode) Insert(key []byte, value []byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
+ if !bytes.Equal(bt.Stem, key[:31]) {
+ bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
+
+ n := &InternalNode{depth: bt.depth}
+ bt.depth++
+ var child, other *BinaryNode
+ if bitStem == 0 {
+ n.left = bt
+ child = &n.left
+ other = &n.right
+ } else {
+ n.right = bt
+ child = &n.right
+ other = &n.left
+ }
+
+ bitKey := key[n.depth/8] >> (7 - (n.depth % 8)) & 1
+ if bitKey == bitStem {
+ var err error
+ *child, err = (*child).Insert(key, value, nil, depth+1)
+ if err != nil {
+ return n, fmt.Errorf("insert error: %w", err)
+ }
+ *other = Empty{}
+ } else {
+ var values [256][]byte
+ values[key[31]] = value
+ *other = &StemNode{
+ Stem: slices.Clone(key[:31]),
+ Values: values[:],
+ depth: depth + 1,
+ }
+ }
+ return n, nil
+ }
+ if len(value) != 32 {
+ return bt, errors.New("invalid insertion: value length")
+ }
+ bt.Values[key[31]] = value
+ return bt, nil
+}
+
+// Copy creates a deep copy of the node.
+func (bt *StemNode) Copy() BinaryNode {
+ var values [256][]byte
+ for i, v := range bt.Values {
+ values[i] = slices.Clone(v)
+ }
+ return &StemNode{
+ Stem: slices.Clone(bt.Stem),
+ Values: values[:],
+ depth: bt.depth,
+ }
+}
+
+// GetHeight returns the height of the node.
+func (bt *StemNode) GetHeight() int {
+ return 1
+}
+
+// Hash returns the hash of the node.
+func (bt *StemNode) Hash() common.Hash {
+ var data [NodeWidth]common.Hash
+ for i, v := range bt.Values {
+ if v != nil {
+ h := sha256.Sum256(v)
+ data[i] = common.BytesToHash(h[:])
+ }
+ }
+
+ h := sha256.New()
+ for level := 1; level <= 8; level++ {
+ for i := range NodeWidth / (1 << level) {
+ h.Reset()
+
+ if data[i*2] == (common.Hash{}) && data[i*2+1] == (common.Hash{}) {
+ data[i] = common.Hash{}
+ continue
+ }
+
+ h.Write(data[i*2][:])
+ h.Write(data[i*2+1][:])
+ data[i] = common.Hash(h.Sum(nil))
+ }
+ }
+
+ h.Reset()
+ h.Write(bt.Stem)
+ h.Write([]byte{0})
+ h.Write(data[0][:])
+ return common.BytesToHash(h.Sum(nil))
+}
+
+// CollectNodes collects all child nodes at a given path, and flushes it
+// into the provided node collector.
+func (bt *StemNode) CollectNodes(path []byte, flush NodeFlushFn) error {
+ flush(path, bt)
+ return nil
+}
+
+// GetValuesAtStem retrieves the group of values located at the given stem key.
+func (bt *StemNode) GetValuesAtStem(_ []byte, _ NodeResolverFn) ([][]byte, error) {
+ return bt.Values[:], nil
+}
+
+// InsertValuesAtStem inserts a full value group at the given stem in the internal node.
+// Already-existing values will be overwritten.
+func (bt *StemNode) InsertValuesAtStem(key []byte, values [][]byte, _ NodeResolverFn, depth int) (BinaryNode, error) {
+ if !bytes.Equal(bt.Stem, key[:31]) {
+ bitStem := bt.Stem[bt.depth/8] >> (7 - (bt.depth % 8)) & 1
+
+ n := &InternalNode{depth: bt.depth}
+ bt.depth++
+ var child, other *BinaryNode
+ if bitStem == 0 {
+ n.left = bt
+ child = &n.left
+ other = &n.right
+ } else {
+ n.right = bt
+ child = &n.right
+ other = &n.left
+ }
+
+ bitKey := key[n.depth/8] >> (7 - (n.depth % 8)) & 1
+ if bitKey == bitStem {
+ var err error
+ *child, err = (*child).InsertValuesAtStem(key, values, nil, depth+1)
+ if err != nil {
+ return n, fmt.Errorf("insert error: %w", err)
+ }
+ *other = Empty{}
+ } else {
+ *other = &StemNode{
+ Stem: slices.Clone(key[:31]),
+ Values: values,
+ depth: n.depth + 1,
+ }
+ }
+ return n, nil
+ }
+
+ // same stem, just merge the two value lists
+ for i, v := range values {
+ if v != nil {
+ bt.Values[i] = v
+ }
+ }
+ return bt, nil
+}
+
+func (bt *StemNode) toDot(parent, path string) string {
+ me := fmt.Sprintf("stem%s", path)
+ ret := fmt.Sprintf("%s [label=\"stem=%x c=%x\"]\n", me, bt.Stem, bt.Hash())
+ ret = fmt.Sprintf("%s %s -> %s\n", ret, parent, me)
+ for i, v := range bt.Values {
+ if v != nil {
+ ret = fmt.Sprintf("%s%s%x [label=\"%x\"]\n", ret, me, i, v)
+ ret = fmt.Sprintf("%s%s -> %s%x\n", ret, me, me, i)
+ }
+ }
+ return ret
+}
+
+// Key returns the full key for the given index.
+func (bt *StemNode) Key(i int) []byte {
+ var ret [32]byte
+ copy(ret[:], bt.Stem)
+ ret[StemSize] = byte(i)
+ return ret[:]
+}
diff --git a/trie/bintrie/stem_node_test.go b/trie/bintrie/stem_node_test.go
new file mode 100644
index 0000000000..e0ffd5c3c8
--- /dev/null
+++ b/trie/bintrie/stem_node_test.go
@@ -0,0 +1,373 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// TestStemNodeInsertSameStem tests inserting values with the same stem
+func TestStemNodeInsertSameStem(t *testing.T) {
+ stem := make([]byte, 31)
+ for i := range stem {
+ stem[i] = byte(i)
+ }
+
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ // Insert another value with the same stem but different last byte
+ key := make([]byte, 32)
+ copy(key[:31], stem)
+ key[31] = 10
+ value := common.HexToHash("0x0202").Bytes()
+
+ newNode, err := node.Insert(key, value, nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert: %v", err)
+ }
+
+ // Should still be a StemNode
+ stemNode, ok := newNode.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", newNode)
+ }
+
+ // Check that both values are present
+ if !bytes.Equal(stemNode.Values[0], values[0]) {
+ t.Errorf("Value at index 0 mismatch")
+ }
+ if !bytes.Equal(stemNode.Values[10], value) {
+ t.Errorf("Value at index 10 mismatch")
+ }
+}
+
+// TestStemNodeInsertDifferentStem tests inserting values with different stems
+func TestStemNodeInsertDifferentStem(t *testing.T) {
+ stem1 := make([]byte, 31)
+ for i := range stem1 {
+ stem1[i] = 0x00
+ }
+
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+
+ node := &StemNode{
+ Stem: stem1,
+ Values: values[:],
+ depth: 0,
+ }
+
+ // Insert with a different stem (first bit different)
+ key := make([]byte, 32)
+ key[0] = 0x80 // First bit is 1 instead of 0
+ value := common.HexToHash("0x0202").Bytes()
+
+ newNode, err := node.Insert(key, value, nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert: %v", err)
+ }
+
+ // Should now be an InternalNode
+ internalNode, ok := newNode.(*InternalNode)
+ if !ok {
+ t.Fatalf("Expected InternalNode, got %T", newNode)
+ }
+
+ // Check depth
+ if internalNode.depth != 0 {
+ t.Errorf("Expected depth 0, got %d", internalNode.depth)
+ }
+
+ // Original stem should be on the left (bit 0)
+ leftStem, ok := internalNode.left.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected left child to be StemNode, got %T", internalNode.left)
+ }
+ if !bytes.Equal(leftStem.Stem, stem1) {
+ t.Errorf("Left stem mismatch")
+ }
+
+ // New stem should be on the right (bit 1)
+ rightStem, ok := internalNode.right.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected right child to be StemNode, got %T", internalNode.right)
+ }
+ if !bytes.Equal(rightStem.Stem, key[:31]) {
+ t.Errorf("Right stem mismatch")
+ }
+}
+
+// TestStemNodeInsertInvalidValueLength tests inserting value with invalid length
+func TestStemNodeInsertInvalidValueLength(t *testing.T) {
+ stem := make([]byte, 31)
+ var values [256][]byte
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ // Try to insert value with wrong length
+ key := make([]byte, 32)
+ copy(key[:31], stem)
+ invalidValue := []byte{1, 2, 3} // Not 32 bytes
+
+ _, err := node.Insert(key, invalidValue, nil, 0)
+ if err == nil {
+ t.Fatal("Expected error for invalid value length")
+ }
+
+ if err.Error() != "invalid insertion: value length" {
+ t.Errorf("Expected 'invalid insertion: value length' error, got: %v", err)
+ }
+}
+
+// TestStemNodeCopy tests the Copy method
+func TestStemNodeCopy(t *testing.T) {
+ stem := make([]byte, 31)
+ for i := range stem {
+ stem[i] = byte(i)
+ }
+
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+ values[255] = common.HexToHash("0x0202").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 10,
+ }
+
+ // Create a copy
+ copied := node.Copy()
+ copiedStem, ok := copied.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", copied)
+ }
+
+ // Check that values are equal but not the same slice
+ if !bytes.Equal(copiedStem.Stem, node.Stem) {
+ t.Errorf("Stem mismatch after copy")
+ }
+ if &copiedStem.Stem[0] == &node.Stem[0] {
+ t.Error("Stem slice not properly cloned")
+ }
+
+ // Check values
+ if !bytes.Equal(copiedStem.Values[0], node.Values[0]) {
+ t.Errorf("Value at index 0 mismatch after copy")
+ }
+ if !bytes.Equal(copiedStem.Values[255], node.Values[255]) {
+ t.Errorf("Value at index 255 mismatch after copy")
+ }
+
+ // Check that value slices are cloned
+ if copiedStem.Values[0] != nil && &copiedStem.Values[0][0] == &node.Values[0][0] {
+ t.Error("Value slice not properly cloned")
+ }
+
+ // Check depth
+ if copiedStem.depth != node.depth {
+ t.Errorf("Depth mismatch: expected %d, got %d", node.depth, copiedStem.depth)
+ }
+}
+
+// TestStemNodeHash tests the Hash method
+func TestStemNodeHash(t *testing.T) {
+ stem := make([]byte, 31)
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ hash1 := node.Hash()
+
+ // Hash should be deterministic
+ hash2 := node.Hash()
+ if hash1 != hash2 {
+ t.Errorf("Hash not deterministic: %x != %x", hash1, hash2)
+ }
+
+ // Changing a value should change the hash
+ node.Values[1] = common.HexToHash("0x0202").Bytes()
+ hash3 := node.Hash()
+ if hash1 == hash3 {
+ t.Error("Hash didn't change after modifying values")
+ }
+}
+
+// TestStemNodeGetValuesAtStem tests GetValuesAtStem method
+func TestStemNodeGetValuesAtStem(t *testing.T) {
+ stem := make([]byte, 31)
+ for i := range stem {
+ stem[i] = byte(i)
+ }
+
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+ values[10] = common.HexToHash("0x0202").Bytes()
+ values[255] = common.HexToHash("0x0303").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ // GetValuesAtStem with matching stem
+ retrievedValues, err := node.GetValuesAtStem(stem, nil)
+ if err != nil {
+ t.Fatalf("Failed to get values: %v", err)
+ }
+
+ // Check that all values match
+ for i := 0; i < 256; i++ {
+ if !bytes.Equal(retrievedValues[i], values[i]) {
+ t.Errorf("Value mismatch at index %d", i)
+ }
+ }
+
+ // GetValuesAtStem with different stem also returns the same values
+ // (implementation ignores the stem parameter)
+ differentStem := make([]byte, 31)
+ differentStem[0] = 0xFF
+
+ retrievedValues2, err := node.GetValuesAtStem(differentStem, nil)
+ if err != nil {
+ t.Fatalf("Failed to get values with different stem: %v", err)
+ }
+
+ // Should still return the same values (stem is ignored)
+ for i := 0; i < 256; i++ {
+ if !bytes.Equal(retrievedValues2[i], values[i]) {
+ t.Errorf("Value mismatch at index %d with different stem", i)
+ }
+ }
+}
+
+// TestStemNodeInsertValuesAtStem tests InsertValuesAtStem method
+func TestStemNodeInsertValuesAtStem(t *testing.T) {
+ stem := make([]byte, 31)
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ // Insert new values at the same stem
+ var newValues [256][]byte
+ newValues[1] = common.HexToHash("0x0202").Bytes()
+ newValues[2] = common.HexToHash("0x0303").Bytes()
+
+ newNode, err := node.InsertValuesAtStem(stem, newValues[:], nil, 0)
+ if err != nil {
+ t.Fatalf("Failed to insert values: %v", err)
+ }
+
+ stemNode, ok := newNode.(*StemNode)
+ if !ok {
+ t.Fatalf("Expected StemNode, got %T", newNode)
+ }
+
+ // Check that all values are present
+ if !bytes.Equal(stemNode.Values[0], values[0]) {
+ t.Error("Original value at index 0 missing")
+ }
+ if !bytes.Equal(stemNode.Values[1], newValues[1]) {
+ t.Error("New value at index 1 missing")
+ }
+ if !bytes.Equal(stemNode.Values[2], newValues[2]) {
+ t.Error("New value at index 2 missing")
+ }
+}
+
+// TestStemNodeGetHeight tests GetHeight method
+func TestStemNodeGetHeight(t *testing.T) {
+ node := &StemNode{
+ Stem: make([]byte, 31),
+ Values: make([][]byte, 256),
+ depth: 0,
+ }
+
+ height := node.GetHeight()
+ if height != 1 {
+ t.Errorf("Expected height 1, got %d", height)
+ }
+}
+
+// TestStemNodeCollectNodes tests CollectNodes method
+func TestStemNodeCollectNodes(t *testing.T) {
+ stem := make([]byte, 31)
+ var values [256][]byte
+ values[0] = common.HexToHash("0x0101").Bytes()
+
+ node := &StemNode{
+ Stem: stem,
+ Values: values[:],
+ depth: 0,
+ }
+
+ var collectedPaths [][]byte
+ var collectedNodes []BinaryNode
+
+ flushFn := func(path []byte, n BinaryNode) {
+ // Make a copy of the path
+ pathCopy := make([]byte, len(path))
+ copy(pathCopy, path)
+ collectedPaths = append(collectedPaths, pathCopy)
+ collectedNodes = append(collectedNodes, n)
+ }
+
+ err := node.CollectNodes([]byte{0, 1, 0}, flushFn)
+ if err != nil {
+ t.Fatalf("Failed to collect nodes: %v", err)
+ }
+
+ // Should have collected one node (itself)
+ if len(collectedNodes) != 1 {
+ t.Errorf("Expected 1 collected node, got %d", len(collectedNodes))
+ }
+
+ // Check that the collected node is the same
+ if collectedNodes[0] != node {
+ t.Error("Collected node doesn't match original")
+ }
+
+ // Check the path
+ if !bytes.Equal(collectedPaths[0], []byte{0, 1, 0}) {
+ t.Errorf("Path mismatch: expected [0, 1, 0], got %v", collectedPaths[0])
+ }
+}
diff --git a/trie/bintrie/trie.go b/trie/bintrie/trie.go
new file mode 100644
index 0000000000..0a8bd325f5
--- /dev/null
+++ b/trie/bintrie/trie.go
@@ -0,0 +1,353 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb/database"
+ "github.com/holiman/uint256"
+)
+
+var errInvalidRootType = errors.New("invalid root type")
+
+// NewBinaryNode creates a new empty binary trie
+func NewBinaryNode() BinaryNode {
+ return Empty{}
+}
+
+// BinaryTrie is the implementation of https://eips.ethereum.org/EIPS/eip-7864.
+type BinaryTrie struct {
+ root BinaryNode
+ reader *trie.Reader
+ tracer *trie.PrevalueTracer
+}
+
+// ToDot converts the binary trie to a DOT language representation. Useful for debugging.
+func (t *BinaryTrie) ToDot() string {
+ t.root.Hash()
+ return ToDot(t.root)
+}
+
+// NewBinaryTrie creates a new binary trie.
+func NewBinaryTrie(root common.Hash, db database.NodeDatabase) (*BinaryTrie, error) {
+ reader, err := trie.NewReader(root, common.Hash{}, db)
+ if err != nil {
+ return nil, err
+ }
+ t := &BinaryTrie{
+ root: NewBinaryNode(),
+ reader: reader,
+ tracer: trie.NewPrevalueTracer(),
+ }
+ // Parse the root node if it's not empty
+ if root != types.EmptyBinaryHash && root != types.EmptyRootHash {
+ blob, err := t.nodeResolver(nil, root)
+ if err != nil {
+ return nil, err
+ }
+ node, err := DeserializeNode(blob, 0)
+ if err != nil {
+ return nil, err
+ }
+ t.root = node
+ }
+ return t, nil
+}
+
+// nodeResolver is a node resolver that reads nodes from the flatdb.
+func (t *BinaryTrie) nodeResolver(path []byte, hash common.Hash) ([]byte, error) {
+ // empty nodes will be serialized as common.Hash{}, so capture
+ // this special use case.
+ if hash == (common.Hash{}) {
+ return nil, nil // empty node
+ }
+ blob, err := t.reader.Node(path, hash)
+ if err != nil {
+ return nil, err
+ }
+ t.tracer.Put(path, blob)
+ return blob, nil
+}
+
+// GetKey returns the sha3 preimage of a hashed key that was previously used
+// to store a value.
+func (t *BinaryTrie) GetKey(key []byte) []byte {
+ return key
+}
+
+// GetWithHashedKey returns the value, assuming that the key has already
+// been hashed.
+func (t *BinaryTrie) GetWithHashedKey(key []byte) ([]byte, error) {
+ return t.root.Get(key, t.nodeResolver)
+}
+
+// GetAccount returns the account information for the given address.
+func (t *BinaryTrie) GetAccount(addr common.Address) (*types.StateAccount, error) {
+ var (
+ values [][]byte
+ err error
+ acc = &types.StateAccount{}
+ key = GetBinaryTreeKey(addr, zero[:])
+ )
+ switch r := t.root.(type) {
+ case *InternalNode:
+ values, err = r.GetValuesAtStem(key[:31], t.nodeResolver)
+ case *StemNode:
+ values = r.Values
+ case Empty:
+ return nil, nil
+ default:
+ // This will cover HashedNode but that should be fine since the
+ // root node should always be resolved.
+ return nil, errInvalidRootType
+ }
+ if err != nil {
+ return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err)
+ }
+
+ // The following code is required for the MPT->Binary conversion.
+ // An account can be partially migrated, where storage slots were moved to the binary
+ // but not yet the account. This means some account information as (header) storage slots
+ // are in the binary trie but basic account information must be read in the base tree (MPT).
+ // TODO: we can simplify this logic depending if the conversion is in progress or finished.
+ emptyAccount := true
+ for i := 0; values != nil && i <= CodeHashLeafKey && emptyAccount; i++ {
+ emptyAccount = emptyAccount && values[i] == nil
+ }
+ if emptyAccount {
+ return nil, nil
+ }
+
+ // If the account has been deleted, then values[10] will be 0 and not nil. If it has
+ // been recreated after that, then its code keccak will NOT be 0. So return `nil` if
+ // the nonce, and values[10], and code keccak is 0.
+ if bytes.Equal(values[BasicDataLeafKey], zero[:]) && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[CodeHashLeafKey], zero[:]) {
+ return nil, nil
+ }
+
+ acc.Nonce = binary.BigEndian.Uint64(values[BasicDataLeafKey][BasicDataNonceOffset:])
+ var balance [16]byte
+ copy(balance[:], values[BasicDataLeafKey][BasicDataBalanceOffset:])
+ acc.Balance = new(uint256.Int).SetBytes(balance[:])
+ acc.CodeHash = values[CodeHashLeafKey]
+
+ return acc, nil
+}
+
+// GetStorage returns the value for key stored in the trie. The value bytes must
+// not be modified by the caller. If a node was not found in the database, a
+// trie.MissingNodeError is returned.
+func (t *BinaryTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
+ return t.root.Get(GetBinaryTreeKey(addr, key), t.nodeResolver)
+}
+
+// UpdateAccount updates the account information for the given address.
+func (t *BinaryTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
+ var (
+ err error
+ basicData [32]byte
+ values = make([][]byte, NodeWidth)
+ stem = GetBinaryTreeKey(addr, zero[:])
+ )
+ binary.BigEndian.PutUint32(basicData[BasicDataCodeSizeOffset-1:], uint32(codeLen))
+ binary.BigEndian.PutUint64(basicData[BasicDataNonceOffset:], acc.Nonce)
+
+ // Because the balance is a max of 16 bytes, truncate
+ // the extra values. This happens in devmode, where
+ // 0xff**32 is allocated to the developer account.
+ balanceBytes := acc.Balance.Bytes()
+ // TODO: reduce the size of the allocation in devmode, then panic instead
+ // of truncating.
+ if len(balanceBytes) > 16 {
+ balanceBytes = balanceBytes[16:]
+ }
+ copy(basicData[32-len(balanceBytes):], balanceBytes[:])
+ values[BasicDataLeafKey] = basicData[:]
+ values[CodeHashLeafKey] = acc.CodeHash[:]
+
+ t.root, err = t.root.InsertValuesAtStem(stem, values, t.nodeResolver, 0)
+ return err
+}
+
+// UpdateStem updates the values for the given stem key.
+func (t *BinaryTrie) UpdateStem(key []byte, values [][]byte) error {
+ var err error
+ t.root, err = t.root.InsertValuesAtStem(key, values, t.nodeResolver, 0)
+ return err
+}
+
+// UpdateStorage associates key with value in the trie. If value has length zero, any
+// existing value is deleted from the trie. The value bytes must not be modified
+// by the caller while they are stored in the trie. If a node was not found in the
+// database, a trie.MissingNodeError is returned.
+func (t *BinaryTrie) UpdateStorage(address common.Address, key, value []byte) error {
+ k := GetBinaryTreeKeyStorageSlot(address, key)
+ var v [32]byte
+ if len(value) >= 32 {
+ copy(v[:], value[:32])
+ } else {
+ copy(v[32-len(value):], value[:])
+ }
+ root, err := t.root.Insert(k, v[:], t.nodeResolver, 0)
+ if err != nil {
+ return fmt.Errorf("UpdateStorage (%x) error: %v", address, err)
+ }
+ t.root = root
+ return nil
+}
+
+// DeleteAccount is a no-op as it is disabled in stateless.
+func (t *BinaryTrie) DeleteAccount(addr common.Address) error {
+ return nil
+}
+
+// DeleteStorage removes any existing value for key from the trie. If a node was not
+// found in the database, a trie.MissingNodeError is returned.
+func (t *BinaryTrie) DeleteStorage(addr common.Address, key []byte) error {
+ k := GetBinaryTreeKey(addr, key)
+ var zero [32]byte
+ root, err := t.root.Insert(k, zero[:], t.nodeResolver, 0)
+ if err != nil {
+ return fmt.Errorf("DeleteStorage (%x) error: %v", addr, err)
+ }
+ t.root = root
+ return nil
+}
+
+// Hash returns the root hash of the trie. It does not write to the database and
+// can be used even if the trie doesn't have one.
+func (t *BinaryTrie) Hash() common.Hash {
+ return t.root.Hash()
+}
+
+// Commit writes all nodes to the trie's memory database, tracking the internal
+// and external (for account tries) references.
+func (t *BinaryTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
+ root := t.root.(*InternalNode)
+ nodeset := trienode.NewNodeSet(common.Hash{})
+
+ err := root.CollectNodes(nil, func(path []byte, node BinaryNode) {
+ serialized := SerializeNode(node)
+ nodeset.AddNode(path, trienode.NewNodeWithPrev(common.Hash{}, serialized, t.tracer.Get(path)))
+ })
+ if err != nil {
+ panic(fmt.Errorf("CollectNodes failed: %v", err))
+ }
+ // Serialize root commitment form
+ return t.Hash(), nodeset
+}
+
+// NodeIterator returns an iterator that returns nodes of the trie. Iteration
+// starts at the key after the given start key.
+func (t *BinaryTrie) NodeIterator(startKey []byte) (trie.NodeIterator, error) {
+ return newBinaryNodeIterator(t, nil)
+}
+
+// Prove constructs a Merkle proof for key. The result contains all encoded nodes
+// on the path to the value at key. The value itself is also included in the last
+// node and can be retrieved by verifying the proof.
+//
+// If the trie does not contain a value for key, the returned proof contains all
+// nodes of the longest existing prefix of the key (at least the root), ending
+// with the node that proves the absence of the key.
+func (t *BinaryTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ panic("not implemented")
+}
+
+// Copy creates a deep copy of the trie.
+func (t *BinaryTrie) Copy() *BinaryTrie {
+ return &BinaryTrie{
+ root: t.root.Copy(),
+ reader: t.reader,
+ tracer: t.tracer.Copy(),
+ }
+}
+
+// IsVerkle returns true if the trie is a Verkle tree.
+func (t *BinaryTrie) IsVerkle() bool {
+ // TODO @gballet This is technically NOT a verkle tree, but it has the same
+ // behavior and basic structure, so for all intents and purposes, it can be
+ // treated as such. Rename this when verkle gets removed.
+ return true
+}
+
+// UpdateContractCode updates the contract code into the trie.
+//
+// Note: the basic data leaf needs to have been previously created for this to work
+func (t *BinaryTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
+ var (
+ chunks = trie.ChunkifyCode(code)
+ values [][]byte
+ key []byte
+ err error
+ )
+ for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 {
+ groupOffset := (chunknr + 128) % 256
+ if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ {
+ values = make([][]byte, NodeWidth)
+ var offset [32]byte
+ binary.LittleEndian.PutUint64(offset[24:], chunknr+128)
+ key = GetBinaryTreeKey(addr, offset[:])
+ }
+ values[groupOffset] = chunks[i : i+32]
+
+ if groupOffset == 255 || len(chunks)-i <= 32 {
+ err = t.UpdateStem(key[:31], values)
+
+ if err != nil {
+ return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err)
+ }
+ }
+ }
+ return nil
+}
+
+// PrefetchAccount attempts to resolve specific accounts from the database
+// to accelerate subsequent trie operations.
+func (t *BinaryTrie) PrefetchAccount(addresses []common.Address) error {
+ for _, addr := range addresses {
+ if _, err := t.GetAccount(addr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PrefetchStorage attempts to resolve specific storage slots from the database
+// to accelerate subsequent trie operations.
+func (t *BinaryTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
+ for _, key := range keys {
+ if _, err := t.GetStorage(addr, key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Witness returns a set containing all trie nodes that have been accessed.
+func (t *BinaryTrie) Witness() map[string][]byte {
+ panic("not implemented")
+}
diff --git a/trie/bintrie/trie_test.go b/trie/bintrie/trie_test.go
new file mode 100644
index 0000000000..84f7689549
--- /dev/null
+++ b/trie/bintrie/trie_test.go
@@ -0,0 +1,197 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bintrie
+
+import (
+ "bytes"
+ "encoding/binary"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ zeroKey = [32]byte{}
+ oneKey = common.HexToHash("0101010101010101010101010101010101010101010101010101010101010101")
+ twoKey = common.HexToHash("0202020202020202020202020202020202020202020202020202020202020202")
+ threeKey = common.HexToHash("0303030303030303030303030303030303030303030303030303030303030303")
+ fourKey = common.HexToHash("0404040404040404040404040404040404040404040404040404040404040404")
+ ffKey = common.HexToHash("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+)
+
+func TestSingleEntry(t *testing.T) {
+ tree := NewBinaryNode()
+ tree, err := tree.Insert(zeroKey[:], oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 1 {
+ t.Fatal("invalid depth")
+ }
+ expected := common.HexToHash("aab1060e04cb4f5dc6f697ae93156a95714debbf77d54238766adc5709282b6f")
+ got := tree.Hash()
+ if got != expected {
+ t.Fatalf("invalid tree root, got %x, want %x", got, expected)
+ }
+}
+
+func TestTwoEntriesDiffFirstBit(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ tree, err = tree.Insert(zeroKey[:], oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("8000000000000000000000000000000000000000000000000000000000000000").Bytes(), twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 2 {
+ t.Fatal("invalid height")
+ }
+ if tree.Hash() != common.HexToHash("dfc69c94013a8b3c65395625a719a87534a7cfd38719251ad8c8ea7fe79f065e") {
+ t.Fatal("invalid tree root")
+ }
+}
+
+func TestOneStemColocatedValues(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ tree, err = tree.Insert(common.HexToHash("0000000000000000000000000000000000000000000000000000000000000003").Bytes(), oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("0000000000000000000000000000000000000000000000000000000000000004").Bytes(), twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("0000000000000000000000000000000000000000000000000000000000000009").Bytes(), threeKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("00000000000000000000000000000000000000000000000000000000000000FF").Bytes(), fourKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 1 {
+ t.Fatal("invalid height")
+ }
+}
+
+func TestTwoStemColocatedValues(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ // stem: 0...0
+ tree, err = tree.Insert(common.HexToHash("0000000000000000000000000000000000000000000000000000000000000003").Bytes(), oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("0000000000000000000000000000000000000000000000000000000000000004").Bytes(), twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // stem: 10...0
+ tree, err = tree.Insert(common.HexToHash("8000000000000000000000000000000000000000000000000000000000000003").Bytes(), oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(common.HexToHash("8000000000000000000000000000000000000000000000000000000000000004").Bytes(), twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 2 {
+ t.Fatal("invalid height")
+ }
+}
+
+func TestTwoKeysMatchFirst42Bits(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ // key1 and key 2 have the same prefix of 42 bits (b0*42+b1+b1) and differ after.
+ key1 := common.HexToHash("0000000000C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0").Bytes()
+ key2 := common.HexToHash("0000000000E00000000000000000000000000000000000000000000000000000").Bytes()
+ tree, err = tree.Insert(key1, oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(key2, twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 1+42+1 {
+ t.Fatal("invalid height")
+ }
+}
+func TestInsertDuplicateKey(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ tree, err = tree.Insert(oneKey[:], oneKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tree, err = tree.Insert(oneKey[:], twoKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tree.GetHeight() != 1 {
+ t.Fatal("invalid height")
+ }
+ // Verify that the value is updated
+ if !bytes.Equal(tree.(*StemNode).Values[1], twoKey[:]) {
+ t.Fatal("invalid height")
+ }
+}
+func TestLargeNumberOfEntries(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ for i := range 256 {
+ var key [32]byte
+ key[0] = byte(i)
+ tree, err = tree.Insert(key[:], ffKey[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ height := tree.GetHeight()
+ if height != 1+8 {
+ t.Fatalf("invalid height, wanted %d, got %d", 1+8, height)
+ }
+}
+
+func TestMerkleizeMultipleEntries(t *testing.T) {
+ var err error
+ tree := NewBinaryNode()
+ keys := [][]byte{
+ zeroKey[:],
+ common.HexToHash("8000000000000000000000000000000000000000000000000000000000000000").Bytes(),
+ common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000").Bytes(),
+ common.HexToHash("8100000000000000000000000000000000000000000000000000000000000000").Bytes(),
+ }
+ for i, key := range keys {
+ var v [32]byte
+ binary.LittleEndian.PutUint64(v[:8], uint64(i))
+ tree, err = tree.Insert(key, v[:], nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ got := tree.Hash()
+ expected := common.HexToHash("9317155862f7a3867660ddd0966ff799a3d16aa4df1e70a7516eaa4a675191b5")
+ if got != expected {
+ t.Fatalf("invalid root, expected=%x, got = %x", expected, got)
+ }
+}
diff --git a/trie/committer.go b/trie/committer.go
index 0939a07abb..2a2142e0ff 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -29,12 +29,12 @@ import (
// insertion order.
type committer struct {
nodes *trienode.NodeSet
- tracer *tracer
+ tracer *PrevalueTracer
collectLeaf bool
}
// newCommitter creates a new committer or picks one from the pool.
-func newCommitter(nodeset *trienode.NodeSet, tracer *tracer, collectLeaf bool) *committer {
+func newCommitter(nodeset *trienode.NodeSet, tracer *PrevalueTracer, collectLeaf bool) *committer {
return &committer{
nodes: nodeset,
tracer: tracer,
@@ -110,14 +110,16 @@ func (c *committer) commitChildren(path []byte, n *fullNode, parallel bool) {
} else {
wg.Add(1)
go func(index int) {
+ defer wg.Done()
+
p := append(path, byte(index))
childSet := trienode.NewNodeSet(c.nodes.Owner)
childCommitter := newCommitter(childSet, c.tracer, c.collectLeaf)
n.Children[index] = childCommitter.commit(p, child, false)
+
nodesMu.Lock()
- c.nodes.MergeSet(childSet)
+ c.nodes.MergeDisjoint(childSet)
nodesMu.Unlock()
- wg.Done()
}(i)
}
}
@@ -140,15 +142,15 @@ func (c *committer) store(path []byte, n node) node {
// The node is embedded in its parent, in other words, this node
// will not be stored in the database independently, mark it as
// deleted only if the node was existent in database before.
- _, ok := c.tracer.accessList[string(path)]
- if ok {
- c.nodes.AddNode(path, trienode.NewDeleted())
+ origin := c.tracer.Get(path)
+ if len(origin) != 0 {
+ c.nodes.AddNode(path, trienode.NewDeletedWithPrev(origin))
}
return n
}
// Collect the dirty node to nodeset for return.
nhash := common.BytesToHash(hash)
- c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n)))
+ c.nodes.AddNode(path, trienode.NewNodeWithPrev(nhash, nodeToBytes(n), c.tracer.Get(path)))
// Collect the corresponding leaf node if it's required. We don't check
// full node since it's impossible to store value in fullNode. The key
diff --git a/trie/iterator.go b/trie/iterator.go
index e6fedf2430..80298ce48f 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -405,7 +405,7 @@ func (it *nodeIterator) resolveHash(hash hashNode, path []byte) (node, error) {
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
- blob, err := it.trie.reader.node(path, common.BytesToHash(hash))
+ blob, err := it.trie.reader.Node(path, common.BytesToHash(hash))
if err != nil {
return nil, err
}
@@ -426,7 +426,7 @@ func (it *nodeIterator) resolveBlob(hash hashNode, path []byte) ([]byte, error)
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
- return it.trie.reader.node(path, common.BytesToHash(hash))
+ return it.trie.reader.Node(path, common.BytesToHash(hash))
}
func (st *nodeIteratorState) resolve(it *nodeIterator, path []byte) error {
diff --git a/trie/proof.go b/trie/proof.go
index 53b7acc30c..1a06ed5d5e 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -69,7 +69,7 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
// loaded blob will be tracked, while it's not required here since
// all loaded nodes won't be linked to trie at all and track nodes
// may lead to out-of-memory issue.
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
+ blob, err := t.reader.Node(prefix, common.BytesToHash(n))
if err != nil {
log.Error("Unhandled trie error in Trie.Prove", "err", err)
return err
@@ -567,7 +567,12 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu
}
// Rebuild the trie with the leaf stream, the shape of trie
// should be same with the original one.
- tr := &Trie{root: root, reader: newEmptyReader(), tracer: newTracer()}
+ tr := &Trie{
+ root: root,
+ reader: newEmptyReader(),
+ opTracer: newOpTracer(),
+ prevalueTracer: NewPrevalueTracer(),
+ }
if empty {
tr.root = nil
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 0424ecb6e5..7c7bd184bf 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -105,19 +105,6 @@ func (t *StateTrie) MustGet(key []byte) []byte {
return t.trie.MustGet(crypto.Keccak256(key))
}
-// GetStorage attempts to retrieve a storage slot with provided account address
-// and slot key. The value bytes must not be modified by the caller.
-// If the specified storage slot is not in the trie, nil will be returned.
-// If a trie node is not found in the database, a MissingNodeError is returned.
-func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
- enc, err := t.trie.Get(crypto.Keccak256(key))
- if err != nil || len(enc) == 0 {
- return nil, err
- }
- _, content, _, err := rlp.Split(enc)
- return content, err
-}
-
// GetAccount attempts to retrieve an account with provided account address.
// If the specified account is not in the trie, nil will be returned.
// If a trie node is not found in the database, a MissingNodeError is returned.
@@ -144,6 +131,39 @@ func (t *StateTrie) GetAccountByHash(addrHash common.Hash) (*types.StateAccount,
return ret, err
}
+// PrefetchAccount attempts to resolve specific accounts from the database
+// to accelerate subsequent trie operations.
+func (t *StateTrie) PrefetchAccount(addresses []common.Address) error {
+ var keys [][]byte
+ for _, addr := range addresses {
+ keys = append(keys, crypto.Keccak256(addr.Bytes()))
+ }
+ return t.trie.Prefetch(keys)
+}
+
+// GetStorage attempts to retrieve a storage slot with provided account address
+// and slot key. The value bytes must not be modified by the caller.
+// If the specified storage slot is not in the trie, nil will be returned.
+// If a trie node is not found in the database, a MissingNodeError is returned.
+func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) {
+ enc, err := t.trie.Get(crypto.Keccak256(key))
+ if err != nil || len(enc) == 0 {
+ return nil, err
+ }
+ _, content, _, err := rlp.Split(enc)
+ return content, err
+}
+
+// PrefetchStorage attempts to resolve specific storage slots from the database
+// to accelerate subsequent trie operations.
+func (t *StateTrie) PrefetchStorage(_ common.Address, keys [][]byte) error {
+ var keylist [][]byte
+ for _, key := range keys {
+ keylist = append(keylist, crypto.Keccak256(key))
+ }
+ return t.trie.Prefetch(keylist)
+}
+
// GetNode attempts to retrieve a trie node by compact-encoded path. It is not
// possible to use keybyte-encoding as the path might contain odd nibbles.
// If the specified trie node is not in the trie, nil will be returned.
@@ -253,7 +273,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
}
// Witness returns a set containing all trie nodes that have been accessed.
-func (t *StateTrie) Witness() map[string]struct{} {
+func (t *StateTrie) Witness() map[string][]byte {
return t.trie.Witness()
}
diff --git a/trie/tracer.go b/trie/tracer.go
index b628d4c991..04122d1384 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -18,14 +18,13 @@ package trie
import (
"maps"
-
- "github.com/ethereum/go-ethereum/common"
+ "sync"
)
-// tracer tracks the changes of trie nodes. During the trie operations,
+// opTracer tracks the changes of trie nodes. During the trie operations,
// some nodes can be deleted from the trie, while these deleted nodes
// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted
-// nodes won't be removed from the disk at all. Tracer is an auxiliary tool
+// nodes won't be removed from the disk at all. opTracer is an auxiliary tool
// used to track all insert and delete operations of trie and capture all
// deleted nodes eventually.
//
@@ -35,38 +34,25 @@ import (
// This tool can track all of them no matter the node is embedded in its
// parent or not, but valueNode is never tracked.
//
-// Besides, it's also used for recording the original value of the nodes
-// when they are resolved from the disk. The pre-value of the nodes will
-// be used to construct trie history in the future.
-//
-// Note tracer is not thread-safe, callers should be responsible for handling
+// Note opTracer is not thread-safe, callers should be responsible for handling
// the concurrency issues by themselves.
-type tracer struct {
- inserts map[string]struct{}
- deletes map[string]struct{}
- accessList map[string][]byte
+type opTracer struct {
+ inserts map[string]struct{}
+ deletes map[string]struct{}
}
-// newTracer initializes the tracer for capturing trie changes.
-func newTracer() *tracer {
- return &tracer{
- inserts: make(map[string]struct{}),
- deletes: make(map[string]struct{}),
- accessList: make(map[string][]byte),
+// newOpTracer initializes the tracer for capturing trie changes.
+func newOpTracer() *opTracer {
+ return &opTracer{
+ inserts: make(map[string]struct{}),
+ deletes: make(map[string]struct{}),
}
}
-// onRead tracks the newly loaded trie node and caches the rlp-encoded
-// blob internally. Don't change the value outside of function since
-// it's not deep-copied.
-func (t *tracer) onRead(path []byte, val []byte) {
- t.accessList[string(path)] = val
-}
-
// onInsert tracks the newly inserted trie node. If it's already
// in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched".
-func (t *tracer) onInsert(path []byte) {
+func (t *opTracer) onInsert(path []byte) {
if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path))
return
@@ -77,7 +63,7 @@ func (t *tracer) onInsert(path []byte) {
// onDelete tracks the newly deleted trie node. If it's already
// in the addition set, then just wipe it from the addition set
// as it's untouched.
-func (t *tracer) onDelete(path []byte) {
+func (t *opTracer) onDelete(path []byte) {
if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path))
return
@@ -86,37 +72,101 @@ func (t *tracer) onDelete(path []byte) {
}
// reset clears the content tracked by tracer.
-func (t *tracer) reset() {
- t.inserts = make(map[string]struct{})
- t.deletes = make(map[string]struct{})
- t.accessList = make(map[string][]byte)
+func (t *opTracer) reset() {
+ clear(t.inserts)
+ clear(t.deletes)
}
-// copy returns a deep copied tracer instanceproto.
-func (t *tracer) copy() *tracer {
- accessList := make(map[string][]byte, len(t.accessList))
- for path, blob := range t.accessList {
- accessList[path] = common.CopyBytes(blob)
- }
- return &tracer{
- inserts: maps.Clone(t.inserts),
- deletes: maps.Clone(t.deletes),
- accessList: accessList,
+// copy returns a deep copied tracer instance.
+func (t *opTracer) copy() *opTracer {
+ return &opTracer{
+ inserts: maps.Clone(t.inserts),
+ deletes: maps.Clone(t.deletes),
}
}
-// deletedNodes returns a list of node paths which are deleted from the trie.
-func (t *tracer) deletedNodes() []string {
- var paths []string
+// deletedList returns a list of node paths which are deleted from the trie.
+func (t *opTracer) deletedList() [][]byte {
+ paths := make([][]byte, 0, len(t.deletes))
for path := range t.deletes {
- // It's possible a few deleted nodes were embedded
- // in their parent before, the deletions can be no
- // effect by deleting nothing, filter them out.
- _, ok := t.accessList[path]
- if !ok {
- continue
- }
- paths = append(paths, path)
+ paths = append(paths, []byte(path))
}
return paths
}
+
+// PrevalueTracer tracks the original values of resolved trie nodes. Cached trie
+// node values are expected to be immutable. A zero-size node value is treated as
+// non-existent and should not occur in practice.
+//
+// Note PrevalueTracer is thread-safe.
+type PrevalueTracer struct {
+ data map[string][]byte
+ lock sync.RWMutex
+}
+
+// NewPrevalueTracer initializes the tracer for capturing resolved trie nodes.
+func NewPrevalueTracer() *PrevalueTracer {
+ return &PrevalueTracer{
+ data: make(map[string][]byte),
+ }
+}
+
+// Put tracks the newly loaded trie node and caches its RLP-encoded
+// blob internally. Do not modify the value outside this function,
+// as it is not deep-copied.
+func (t *PrevalueTracer) Put(path []byte, val []byte) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ t.data[string(path)] = val
+}
+
+// Get returns the cached trie node value. If the node is not found, nil will
+// be returned.
+func (t *PrevalueTracer) Get(path []byte) []byte {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.data[string(path)]
+}
+
+// HasList returns a list of flags indicating whether the corresponding trie nodes
+// specified by the path exist in the trie.
+func (t *PrevalueTracer) HasList(list [][]byte) []bool {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ exists := make([]bool, 0, len(list))
+ for _, path := range list {
+ _, ok := t.data[string(path)]
+ exists = append(exists, ok)
+ }
+ return exists
+}
+
+// Values returns a list of values of the cached trie nodes.
+func (t *PrevalueTracer) Values() map[string][]byte {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return maps.Clone(t.data)
+}
+
+// Reset resets the cached content in the prevalueTracer.
+func (t *PrevalueTracer) Reset() {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ clear(t.data)
+}
+
+// Copy returns a copied prevalueTracer instance.
+func (t *PrevalueTracer) Copy() *PrevalueTracer {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ // Shadow clone is used, as the cached trie node values are immutable
+ return &PrevalueTracer{
+ data: maps.Clone(t.data),
+ }
+}
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index 852a706021..695570fd0d 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -52,15 +52,15 @@ var (
}
)
-func TestTrieTracer(t *testing.T) {
- testTrieTracer(t, tiny)
- testTrieTracer(t, nonAligned)
- testTrieTracer(t, standard)
+func TestTrieOpTracer(t *testing.T) {
+ testTrieOpTracer(t, tiny)
+ testTrieOpTracer(t, nonAligned)
+ testTrieOpTracer(t, standard)
}
// Tests if the trie diffs are tracked correctly. Tracer should capture
// all non-leaf dirty nodes, no matter the node is embedded or not.
-func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
+func testTrieOpTracer(t *testing.T, vals []struct{ k, v string }) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
@@ -68,8 +68,8 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
- insertSet := copySet(trie.tracer.inserts) // copy before commit
- deleteSet := copySet(trie.tracer.deletes) // copy before commit
+ insertSet := copySet(trie.opTracer.inserts) // copy before commit
+ deleteSet := copySet(trie.opTracer.deletes) // copy before commit
root, nodes := trie.Commit(false)
db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
@@ -86,7 +86,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustDelete([]byte(val.k))
}
- insertSet, deleteSet = copySet(trie.tracer.inserts), copySet(trie.tracer.deletes)
+ insertSet, deleteSet = copySet(trie.opTracer.inserts), copySet(trie.opTracer.deletes)
if !compareSet(insertSet, nil) {
t.Fatal("Unexpected insertion set")
}
@@ -97,13 +97,13 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
// Test that after inserting a new batch of nodes and deleting them immediately,
// the trie tracer should be cleared normally as no operation happened.
-func TestTrieTracerNoop(t *testing.T) {
- testTrieTracerNoop(t, tiny)
- testTrieTracerNoop(t, nonAligned)
- testTrieTracerNoop(t, standard)
+func TestTrieOpTracerNoop(t *testing.T) {
+ testTrieOpTracerNoop(t, tiny)
+ testTrieOpTracerNoop(t, nonAligned)
+ testTrieOpTracerNoop(t, standard)
}
-func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
+func testTrieOpTracerNoop(t *testing.T, vals []struct{ k, v string }) {
db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
for _, val := range vals {
@@ -112,22 +112,22 @@ func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
for _, val := range vals {
trie.MustDelete([]byte(val.k))
}
- if len(trie.tracer.inserts) != 0 {
+ if len(trie.opTracer.inserts) != 0 {
t.Fatal("Unexpected insertion set")
}
- if len(trie.tracer.deletes) != 0 {
+ if len(trie.opTracer.deletes) != 0 {
t.Fatal("Unexpected deletion set")
}
}
-// Tests if the accessList is correctly tracked.
-func TestAccessList(t *testing.T) {
- testAccessList(t, tiny)
- testAccessList(t, nonAligned)
- testAccessList(t, standard)
+// Tests if the original value of trie nodes are correctly tracked.
+func TestPrevalueTracer(t *testing.T) {
+ testPrevalueTracer(t, tiny)
+ testPrevalueTracer(t, nonAligned)
+ testPrevalueTracer(t, standard)
}
-func testAccessList(t *testing.T, vals []struct{ k, v string }) {
+func testPrevalueTracer(t *testing.T, vals []struct{ k, v string }) {
var (
db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db)
@@ -210,7 +210,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
}
// Tests origin values won't be tracked in Iterator or Prover
-func TestAccessListLeak(t *testing.T) {
+func TestPrevalueTracerLeak(t *testing.T) {
var (
db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db)
@@ -249,9 +249,9 @@ func TestAccessListLeak(t *testing.T) {
}
for _, c := range cases {
trie, _ = New(TrieID(root), db)
- n1 := len(trie.tracer.accessList)
+ n1 := len(trie.prevalueTracer.data)
c.op(trie)
- n2 := len(trie.tracer.accessList)
+ n2 := len(trie.prevalueTracer.data)
if n1 != n2 {
t.Fatalf("AccessList is leaked, prev %d after %d", n1, n2)
diff --git a/trie/transition.go b/trie/transition.go
new file mode 100644
index 0000000000..da49c6cdc2
--- /dev/null
+++ b/trie/transition.go
@@ -0,0 +1,227 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-verkle"
+)
+
+// TransitionTrie is a trie that implements a façade design pattern, presenting
+// a single interface to the old MPT trie and the new verkle/binary trie. Reads
+// first from the overlay trie, and falls back to the base trie if the key isn't
+// found. All writes go to the overlay trie.
+type TransitionTrie struct {
+ overlay *VerkleTrie
+ base *SecureTrie
+ storage bool
+}
+
+// NewTransitionTrie creates a new TransitionTrie.
+func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie {
+ return &TransitionTrie{
+ overlay: overlay,
+ base: base,
+ storage: st,
+ }
+}
+
+// Base returns the base trie.
+func (t *TransitionTrie) Base() *SecureTrie {
+ return t.base
+}
+
+// Overlay returns the overlay trie.
+func (t *TransitionTrie) Overlay() *VerkleTrie {
+ return t.overlay
+}
+
+// GetKey returns the sha3 preimage of a hashed key that was previously used
+// to store a value.
+func (t *TransitionTrie) GetKey(key []byte) []byte {
+ if key := t.overlay.GetKey(key); key != nil {
+ return key
+ }
+ return t.base.GetKey(key)
+}
+
+// GetStorage returns the value for key stored in the trie. The value bytes must
+// not be modified by the caller.
+func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
+ val, err := t.overlay.GetStorage(addr, key)
+ if err != nil {
+ return nil, fmt.Errorf("get storage from overlay: %s", err)
+ }
+ if len(val) != 0 {
+ return val, nil
+ }
+ // TODO also insert value into overlay
+ return t.base.GetStorage(addr, key)
+}
+
+// PrefetchStorage attempts to resolve specific storage slots from the database
+// to accelerate subsequent trie operations.
+func (t *TransitionTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
+ for _, key := range keys {
+ if _, err := t.GetStorage(addr, key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetAccount abstract an account read from the trie.
+func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
+ data, err := t.overlay.GetAccount(address)
+ if err != nil {
+ // Post cancun, no indicator needs to be used to indicate that
+ // an account was deleted in the overlay tree. If an error is
+ // returned, then it's a genuine error, and not an indicator
+ // that a tombstone was found.
+ return nil, err
+ }
+ if data != nil {
+ return data, nil
+ }
+ return t.base.GetAccount(address)
+}
+
+// PrefetchAccount attempts to resolve specific accounts from the database
+// to accelerate subsequent trie operations.
+func (t *TransitionTrie) PrefetchAccount(addresses []common.Address) error {
+ for _, addr := range addresses {
+ if _, err := t.GetAccount(addr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UpdateStorage associates key with value in the trie. If value has length zero, any
+// existing value is deleted from the trie. The value bytes must not be modified
+// by the caller while they are stored in the trie.
+func (t *TransitionTrie) UpdateStorage(address common.Address, key []byte, value []byte) error {
+ var v []byte
+ if len(value) >= 32 {
+ v = value[:32]
+ } else {
+ var val [32]byte
+ copy(val[32-len(value):], value[:])
+ v = val[:]
+ }
+ return t.overlay.UpdateStorage(address, key, v)
+}
+
+// UpdateAccount abstract an account write to the trie.
+func (t *TransitionTrie) UpdateAccount(addr common.Address, account *types.StateAccount, codeLen int) error {
+ // NOTE: before the rebase, this was saving the state root, so that OpenStorageTrie
+ // could still work during a replay. This is no longer needed, as OpenStorageTrie
+ // only needs to know what the account trie does now.
+ return t.overlay.UpdateAccount(addr, account, codeLen)
+}
+
+// DeleteStorage removes any existing value for key from the trie. If a node was not
+// found in the database, a trie.MissingNodeError is returned.
+func (t *TransitionTrie) DeleteStorage(addr common.Address, key []byte) error {
+ return t.overlay.DeleteStorage(addr, key)
+}
+
+// DeleteAccount abstracts an account deletion from the trie.
+func (t *TransitionTrie) DeleteAccount(key common.Address) error {
+ return t.overlay.DeleteAccount(key)
+}
+
+// Hash returns the root hash of the trie. It does not write to the database and
+// can be used even if the trie doesn't have one.
+func (t *TransitionTrie) Hash() common.Hash {
+ return t.overlay.Hash()
+}
+
+// Commit collects all dirty nodes in the trie and replace them with the
+// corresponding node hash. All collected nodes(including dirty leaves if
+// collectLeaf is true) will be encapsulated into a nodeset for return.
+// The returned nodeset can be nil if the trie is clean(nothing to commit).
+// Once the trie is committed, it's not usable anymore. A new trie must
+// be created with new root and updated trie database for following usage
+func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+ // Just return if the trie is a storage trie: otherwise,
+ // the overlay trie will be committed as many times as
+ // there are storage tries. This would kill performance.
+ if t.storage {
+ return common.Hash{}, nil
+ }
+ return t.overlay.Commit(collectLeaf)
+}
+
+// NodeIterator returns an iterator that returns nodes of the trie. Iteration
+// starts at the key after the given start key.
+func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
+ panic("not implemented") // TODO: Implement
+}
+
+// Prove constructs a Merkle proof for key. The result contains all encoded nodes
+// on the path to the value at key. The value itself is also included in the last
+// node and can be retrieved by verifying the proof.
+//
+// If the trie does not contain a value for key, the returned proof contains all
+// nodes of the longest existing prefix of the key (at least the root), ending
+// with the node that proves the absence of the key.
+func (t *TransitionTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ panic("not implemented") // TODO: Implement
+}
+
+// IsVerkle returns true if the trie is verkle-tree based
+func (t *TransitionTrie) IsVerkle() bool {
+ // For all intents and purposes, the calling code should treat this as a verkle trie
+ return true
+}
+
+// UpdateStem updates a group of values, given the stem they are using. If
+// a value already exists, it is overwritten.
+func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error {
+ trie := t.overlay
+ switch root := trie.root.(type) {
+ case *verkle.InternalNode:
+ return root.InsertValuesAtStem(key, values, t.overlay.nodeResolver)
+ default:
+ panic("invalid root type")
+ }
+}
+
+// Copy creates a deep copy of the transition trie.
+func (t *TransitionTrie) Copy() *TransitionTrie {
+ return &TransitionTrie{
+ overlay: t.overlay.Copy(),
+ base: t.base.Copy(),
+ storage: t.storage,
+ }
+}
+
+// UpdateContractCode updates the contract code for the given address.
+func (t *TransitionTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
+ return t.overlay.UpdateContractCode(addr, codeHash, code)
+}
+
+// Witness returns a set containing all trie nodes that have been accessed.
+func (t *TransitionTrie) Witness() map[string][]byte {
+ panic("not implemented")
+}
diff --git a/trie/trie.go b/trie/trie.go
index 0527b9b10e..fddcd548ea 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -21,12 +21,14 @@ import (
"bytes"
"errors"
"fmt"
+ "slices"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb/database"
+ "golang.org/x/sync/errgroup"
)
// Trie represents a Merkle Patricia Trie. Use New to create a trie that operates
@@ -53,10 +55,11 @@ type Trie struct {
uncommitted int
// reader is the handler trie can retrieve nodes from.
- reader *trieReader
+ reader *Reader
- // tracer is the tool to track the trie changes.
- tracer *tracer
+ // Various tracers for capturing the modifications to trie
+ opTracer *opTracer
+ prevalueTracer *PrevalueTracer
}
// newFlag returns the cache flag value for a newly created node.
@@ -67,13 +70,14 @@ func (t *Trie) newFlag() nodeFlag {
// Copy returns a copy of Trie.
func (t *Trie) Copy() *Trie {
return &Trie{
- root: copyNode(t.root),
- owner: t.owner,
- committed: t.committed,
- unhashed: t.unhashed,
- uncommitted: t.uncommitted,
- reader: t.reader,
- tracer: t.tracer.copy(),
+ root: copyNode(t.root),
+ owner: t.owner,
+ committed: t.committed,
+ unhashed: t.unhashed,
+ uncommitted: t.uncommitted,
+ reader: t.reader,
+ opTracer: t.opTracer.copy(),
+ prevalueTracer: t.prevalueTracer.Copy(),
}
}
@@ -84,14 +88,15 @@ func (t *Trie) Copy() *Trie {
// empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not.
func New(id *ID, db database.NodeDatabase) (*Trie, error) {
- reader, err := newTrieReader(id.StateRoot, id.Owner, db)
+ reader, err := NewReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
}
trie := &Trie{
- owner: id.Owner,
- reader: reader,
- tracer: newTracer(),
+ owner: id.Owner,
+ reader: reader,
+ opTracer: newOpTracer(),
+ prevalueTracer: NewPrevalueTracer(),
}
if id.Root != (common.Hash{}) && id.Root != types.EmptyRootHash {
rootnode, err := trie.resolveAndTrack(id.Root[:], nil)
@@ -190,6 +195,51 @@ func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode no
}
}
+// Prefetch attempts to resolve the leaves and intermediate trie nodes
+// specified by the key list in parallel. The results are silently
+// discarded to simplify the function.
+func (t *Trie) Prefetch(keylist [][]byte) error {
+ // Short circuit if the trie is already committed and not usable.
+ if t.committed {
+ return ErrCommitted
+ }
+ // Resolve the trie nodes sequentially if there are not too many
+ // trie nodes in the trie.
+ fn, ok := t.root.(*fullNode)
+ if !ok || len(keylist) < 16 {
+ for _, key := range keylist {
+ _, err := t.Get(key)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ var (
+ keys = make(map[byte][][]byte)
+ eg errgroup.Group
+ )
+ for _, key := range keylist {
+ hkey := keybytesToHex(key)
+ keys[hkey[0]] = append(keys[hkey[0]], hkey)
+ }
+ for pos, ks := range keys {
+ eg.Go(func() error {
+ for _, k := range ks {
+ _, newnode, didResolve, err := t.get(fn.Children[pos], k, 1)
+ if err == nil && didResolve {
+ fn.Children[pos] = newnode
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+ }
+ return eg.Wait()
+}
+
// MustGetNode is a wrapper of GetNode and will omit any encountered error but
// just print out an error message.
func (t *Trie) MustGetNode(path []byte) ([]byte, int) {
@@ -239,7 +289,7 @@ func (t *Trie) getNode(origNode node, path []byte, pos int) (item []byte, newnod
if hash == nil {
return nil, origNode, 0, errors.New("non-consensus node")
}
- blob, err := t.reader.node(path, common.BytesToHash(hash))
+ blob, err := t.reader.Node(path, common.BytesToHash(hash))
return blob, origNode, 1, err
}
// Path still needs to be traversed, descend into children
@@ -361,7 +411,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
// New branch node is created as a child of the original short node.
// Track the newly inserted node in the tracer. The node identifier
// passed is the path from the root node.
- t.tracer.onInsert(append(prefix, key[:matchlen]...))
+ t.opTracer.onInsert(append(prefix, key[:matchlen]...))
// Replace it with a short node leading up to the branch.
return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil
@@ -379,7 +429,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
// New short node is created and track it in the tracer. The node identifier
// passed is the path from the root node. Note the valueNode won't be tracked
// since it's always embedded in its parent.
- t.tracer.onInsert(prefix)
+ t.opTracer.onInsert(prefix)
return true, &shortNode{key, value, t.newFlag()}, nil
@@ -444,7 +494,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// The matched short node is deleted entirely and track
// it in the deletion set. The same the valueNode doesn't
// need to be tracked at all since it's always embedded.
- t.tracer.onDelete(prefix)
+ t.opTracer.onDelete(prefix)
return true, nil, nil // remove n entirely for whole matches
}
@@ -460,7 +510,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
case *shortNode:
// The child shortNode is merged into its parent, track
// is deleted as well.
- t.tracer.onDelete(append(prefix, n.Key...))
+ t.opTracer.onDelete(append(prefix, n.Key...))
// Deleting from the subtrie reduced it to another
// short node. Merge the nodes to avoid creating a
@@ -468,7 +518,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// always creates a new slice) instead of append to
// avoid modifying n.Key since it might be shared with
// other nodes.
- return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil
+ return true, &shortNode{slices.Concat(n.Key, child.Key), child.Val, t.newFlag()}, nil
default:
return true, &shortNode{n.Key, child, t.newFlag()}, nil
}
@@ -525,7 +575,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
// Replace the entire full node with the short node.
// Mark the original short node as deleted since the
// value is embedded into the parent now.
- t.tracer.onDelete(append(prefix, byte(pos)))
+ t.opTracer.onDelete(append(prefix, byte(pos)))
k := append([]byte{byte(pos)}, cnode.Key...)
return true, &shortNode{k, cnode.Val, t.newFlag()}, nil
@@ -563,13 +613,6 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) {
}
}
-func concat(s1 []byte, s2 ...byte) []byte {
- r := make([]byte, len(s1)+len(s2))
- copy(r, s1)
- copy(r[len(s1):], s2)
- return r
-}
-
// copyNode deep-copies the supplied node along with its children recursively.
func copyNode(n node) node {
switch n := (n).(type) {
@@ -612,17 +655,35 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
// node's original value. The rlp-encoded blob is preferred to be loaded from
// database because it's easy to decode node while complex to encode node to blob.
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
- blob, err := t.reader.node(prefix, common.BytesToHash(n))
+ blob, err := t.reader.Node(prefix, common.BytesToHash(n))
if err != nil {
return nil, err
}
- t.tracer.onRead(prefix, blob)
+ t.prevalueTracer.Put(prefix, blob)
// The returned node blob won't be changed afterward. No need to
// deep-copy the slice.
return decodeNodeUnsafe(n, blob)
}
+// deletedNodes returns a list of node paths, referring the nodes being deleted
+// from the trie. It's possible a few deleted nodes were embedded in their parent
+// before, the deletions can be no effect by deleting nothing, filter them out.
+func (t *Trie) deletedNodes() [][]byte {
+ var (
+ pos int
+ list = t.opTracer.deletedList()
+ flags = t.prevalueTracer.HasList(list)
+ )
+ for i := 0; i < len(list); i++ {
+ if flags[i] {
+ list[pos] = list[i]
+ pos++
+ }
+ }
+ return list[:pos] // trim to the new length
+}
+
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
@@ -644,13 +705,13 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
// (b) The trie was non-empty and all nodes are dropped => return
// the node set includes all deleted nodes
if t.root == nil {
- paths := t.tracer.deletedNodes()
+ paths := t.deletedNodes()
if len(paths) == 0 {
return types.EmptyRootHash, nil // case (a)
}
nodes := trienode.NewNodeSet(t.owner)
for _, path := range paths {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
+ nodes.AddNode(path, trienode.NewDeletedWithPrev(t.prevalueTracer.Get(path)))
}
return types.EmptyRootHash, nodes // case (b)
}
@@ -667,11 +728,11 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
return rootHash, nil
}
nodes := trienode.NewNodeSet(t.owner)
- for _, path := range t.tracer.deletedNodes() {
- nodes.AddNode([]byte(path), trienode.NewDeleted())
+ for _, path := range t.deletedNodes() {
+ nodes.AddNode(path, trienode.NewDeletedWithPrev(t.prevalueTracer.Get(path)))
}
// If the number of changes is below 100, we let one thread handle it
- t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root, t.uncommitted > 100)
+ t.root = newCommitter(nodes, t.prevalueTracer, collectLeaf).Commit(t.root, t.uncommitted > 100)
t.uncommitted = 0
return rootHash, nodes
}
@@ -691,15 +752,8 @@ func (t *Trie) hashRoot() []byte {
}
// Witness returns a set containing all trie nodes that have been accessed.
-func (t *Trie) Witness() map[string]struct{} {
- if len(t.tracer.accessList) == 0 {
- return nil
- }
- witness := make(map[string]struct{}, len(t.tracer.accessList))
- for _, node := range t.tracer.accessList {
- witness[string(node)] = struct{}{}
- }
- return witness
+func (t *Trie) Witness() map[string][]byte {
+ return t.prevalueTracer.Values()
}
// Reset drops the referenced root node and cleans all internal state.
@@ -708,6 +762,7 @@ func (t *Trie) Reset() {
t.owner = common.Hash{}
t.unhashed = 0
t.uncommitted = 0
- t.tracer.reset()
+ t.opTracer.reset()
+ t.prevalueTracer.Reset()
t.committed = false
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index a42cdb0cf9..42fe4d72c7 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -22,39 +22,39 @@ import (
"github.com/ethereum/go-ethereum/triedb/database"
)
-// trieReader is a wrapper of the underlying node reader. It's not safe
+// Reader is a wrapper of the underlying database reader. It's not safe
// for concurrent usage.
-type trieReader struct {
+type Reader struct {
owner common.Hash
reader database.NodeReader
banned map[string]struct{} // Marker to prevent node from being accessed, for tests
}
-// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db database.NodeDatabase) (*trieReader, error) {
+// NewReader initializes the trie reader with the given database reader.
+func NewReader(stateRoot, owner common.Hash, db database.NodeDatabase) (*Reader, error) {
if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
- return &trieReader{owner: owner}, nil
+ return &Reader{owner: owner}, nil
}
reader, err := db.NodeReader(stateRoot)
if err != nil {
return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err}
}
- return &trieReader{owner: owner, reader: reader}, nil
+ return &Reader{owner: owner, reader: reader}, nil
}
// newEmptyReader initializes the pure in-memory reader. All read operations
// should be forbidden and returns the MissingNodeError.
-func newEmptyReader() *trieReader {
- return &trieReader{}
+func newEmptyReader() *Reader {
+ return &Reader{}
}
-// node retrieves the rlp-encoded trie node with the provided trie node
+// Node retrieves the rlp-encoded trie node with the provided trie node
// information. An MissingNodeError will be returned in case the node is
// not found or any error is encountered.
//
// Don't modify the returned byte slice since it's not deep-copied and
// still be referenced by database.
-func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
+func (r *Reader) Node(path []byte, hash common.Hash) ([]byte, error) {
// Perform the logics in tests for preventing trie node access.
if r.banned != nil {
if _, ok := r.banned[string(path)]; ok {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index edd85677fe..22c3494f47 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -449,35 +449,35 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- //if len(n.Prev) > 0 {
- // return errors.New("unexpected origin value")
- //}
+ if len(set.Origins[path]) > 0 {
+ return errors.New("unexpected origin value")
+ }
}
// Check deletion set
- for path := range deletes {
+ for path, blob := range deletes {
n, ok := set.Nodes[path]
if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
+ if len(set.Origins[path]) == 0 {
+ return errors.New("expect origin value")
+ }
+ if !bytes.Equal(set.Origins[path], blob) {
+ return errors.New("invalid origin value")
+ }
}
// Check update set
- for path := range updates {
+ for path, blob := range updates {
n, ok := set.Nodes[path]
if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- //if len(n.Prev) == 0 {
- // return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
- // return errors.New("invalid origin value")
- //}
+ if len(set.Origins[path]) == 0 {
+ return errors.New("expect origin value")
+ }
+ if !bytes.Equal(set.Origins[path], blob) {
+ return errors.New("invalid origin value")
+ }
}
return nil
}
@@ -595,18 +595,18 @@ func runRandTest(rt randTest) error {
deleteExp[path] = struct{}{}
}
}
- if len(insertExp) != len(tr.tracer.inserts) {
+ if len(insertExp) != len(tr.opTracer.inserts) {
rt[i].err = errors.New("insert set mismatch")
}
- if len(deleteExp) != len(tr.tracer.deletes) {
+ if len(deleteExp) != len(tr.opTracer.deletes) {
rt[i].err = errors.New("delete set mismatch")
}
- for insert := range tr.tracer.inserts {
+ for insert := range tr.opTracer.inserts {
if _, present := insertExp[insert]; !present {
rt[i].err = errors.New("missing inserted node")
}
}
- for del := range tr.tracer.deletes {
+ for del := range tr.opTracer.deletes {
if _, present := deleteExp[del]; !present {
rt[i].err = errors.New("missing deleted node")
}
@@ -1499,3 +1499,83 @@ func testTrieCopyNewTrie(t *testing.T, entries []kv) {
t.Errorf("Hash mismatch: old %v, new %v", hash, tr.Hash())
}
}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// cpu: Apple M1 Pro
+// BenchmarkTriePrefetch
+// BenchmarkTriePrefetch-8 9961 100706 ns/op
+func BenchmarkTriePrefetch(b *testing.B) {
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ tr := NewEmpty(db)
+ vals := make(map[string]*kv)
+ for i := 0; i < 3000; i++ {
+ value := &kv{
+ k: randBytes(32),
+ v: randBytes(20),
+ t: false,
+ }
+ tr.MustUpdate(value.k, value.v)
+ vals[string(value.k)] = value
+ }
+ root, nodes := tr.Commit(false)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ tr, err := New(TrieID(root), db)
+ if err != nil {
+ b.Fatalf("Failed to open the trie")
+ }
+ var keys [][]byte
+ for k := range vals {
+ keys = append(keys, []byte(k))
+ if len(keys) > 64 {
+ break
+ }
+ }
+ tr.Prefetch(keys)
+ }
+}
+
+// goos: darwin
+// goarch: arm64
+// pkg: github.com/ethereum/go-ethereum/trie
+// cpu: Apple M1 Pro
+// BenchmarkTrieSeqPrefetch
+// BenchmarkTrieSeqPrefetch-8 12879 96710 ns/op
+func BenchmarkTrieSeqPrefetch(b *testing.B) {
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ tr := NewEmpty(db)
+ vals := make(map[string]*kv)
+ for i := 0; i < 3000; i++ {
+ value := &kv{
+ k: randBytes(32),
+ v: randBytes(20),
+ t: false,
+ }
+ tr.MustUpdate(value.k, value.v)
+ vals[string(value.k)] = value
+ }
+ root, nodes := tr.Commit(false)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ tr, err := New(TrieID(root), db)
+ if err != nil {
+ b.Fatalf("Failed to open the trie")
+ }
+ var keys [][]byte
+ for k := range vals {
+ keys = append(keys, []byte(k))
+ if len(keys) > 64 {
+ break
+ }
+ }
+ for _, k := range keys {
+ tr.Get(k)
+ }
+ }
+}
diff --git a/trie/trienode/node.go b/trie/trienode/node.go
index b09ec66374..c83dc27cef 100644
--- a/trie/trienode/node.go
+++ b/trie/trienode/node.go
@@ -51,6 +51,35 @@ func New(hash common.Hash, blob []byte) *Node {
// NewDeleted constructs a node which is deleted.
func NewDeleted() *Node { return New(common.Hash{}, nil) }
+// NodeWithPrev is a wrapper over Node by tracking the original value of node.
+type NodeWithPrev struct {
+ *Node
+ Prev []byte // Nil means the node was not existent
+}
+
+// NewNodeWithPrev constructs a node with the additional original value.
+func NewNodeWithPrev(hash common.Hash, blob []byte, prev []byte) *NodeWithPrev {
+ return &NodeWithPrev{
+ Node: &Node{
+ Hash: hash,
+ Blob: blob,
+ },
+ Prev: prev,
+ }
+}
+
+// NewDeletedWithPrev constructs a node which is deleted with the additional
+// original value.
+func NewDeletedWithPrev(prev []byte) *NodeWithPrev {
+ return &NodeWithPrev{
+ Node: &Node{
+ Hash: common.Hash{},
+ Blob: nil,
+ },
+ Prev: prev,
+ }
+}
+
// leaf represents a trie leaf node
type leaf struct {
Blob []byte // raw blob of leaf
@@ -63,6 +92,8 @@ type NodeSet struct {
Owner common.Hash
Leaves []*leaf
Nodes map[string]*Node
+ Origins map[string][]byte
+
updates int // the count of updated and inserted nodes
deletes int // the count of deleted nodes
}
@@ -71,8 +102,9 @@ type NodeSet struct {
// the owning account address hash for storage tries.
func NewNodeSet(owner common.Hash) *NodeSet {
return &NodeSet{
- Owner: owner,
- Nodes: make(map[string]*Node),
+ Owner: owner,
+ Nodes: make(map[string]*Node),
+ Origins: make(map[string][]byte),
}
}
@@ -91,22 +123,25 @@ func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) {
}
// AddNode adds the provided node into set.
-func (set *NodeSet) AddNode(path []byte, n *Node) {
+func (set *NodeSet) AddNode(path []byte, n *NodeWithPrev) {
if n.IsDeleted() {
set.deletes += 1
} else {
set.updates += 1
}
- set.Nodes[string(path)] = n
+ key := string(path)
+ set.Nodes[key] = n.Node
+ set.Origins[key] = n.Prev
}
-// MergeSet merges this 'set' with 'other'. It assumes that the sets are disjoint,
+// MergeDisjoint merges this 'set' with 'other'. It assumes that the sets are disjoint,
// and thus does not deduplicate data (count deletes, dedup leaves etc).
-func (set *NodeSet) MergeSet(other *NodeSet) error {
+func (set *NodeSet) MergeDisjoint(other *NodeSet) error {
if set.Owner != other.Owner {
return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, other.Owner)
}
maps.Copy(set.Nodes, other.Nodes)
+ maps.Copy(set.Origins, other.Origins)
set.deletes += other.deletes
set.updates += other.updates
@@ -117,12 +152,13 @@ func (set *NodeSet) MergeSet(other *NodeSet) error {
return nil
}
-// Merge adds a set of nodes into the set.
-func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
- if set.Owner != owner {
- return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner)
+// Merge adds a set of nodes to the current set. It assumes the sets may overlap,
+// so deduplication is performed.
+func (set *NodeSet) Merge(other *NodeSet) error {
+ if set.Owner != other.Owner {
+ return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, other.Owner)
}
- for path, node := range nodes {
+ for path, node := range other.Nodes {
prev, ok := set.Nodes[path]
if ok {
// overwrite happens, revoke the counter
@@ -137,8 +173,17 @@ func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error {
} else {
set.updates += 1
}
- set.Nodes[path] = node
+ set.Nodes[path] = node // overwrite the node with new value
+
+ // Add the original value only if it was previously non-existent.
+ // If multiple mutations are made to the same node, the first one
+ // is considered the true original value.
+ if _, exist := set.Origins[path]; !exist {
+ set.Origins[path] = other.Origins[path]
+ }
}
+ // TODO leaves are not aggregated, as they are not used in storage tries.
+ // TODO(rjl493456442) deprecate the leaves along with the legacy hash mode.
return nil
}
@@ -169,11 +214,16 @@ func (set *NodeSet) Summary() string {
for path, n := range set.Nodes {
// Deletion
if n.IsDeleted() {
- fmt.Fprintf(out, " [-]: %x\n", path)
+ fmt.Fprintf(out, " [-]: %x prev: %x\n", path, set.Origins[path])
+ continue
+ }
+ // Insertion
+ if len(set.Origins[path]) == 0 {
+ fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash)
continue
}
- // Insertion or update
- fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash)
+ // Update
+ fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, set.Origins[path])
}
for _, n := range set.Leaves {
fmt.Fprintf(out, "[leaf]: %v\n", n)
@@ -203,7 +253,7 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet {
func (set *MergedNodeSet) Merge(other *NodeSet) error {
subset, present := set.Sets[other.Owner]
if present {
- return subset.Merge(other.Owner, other.Nodes)
+ return subset.Merge(other)
}
set.Sets[other.Owner] = other
return nil
diff --git a/trie/trienode/node_test.go b/trie/trienode/node_test.go
index bcb3a2202b..332b6f1776 100644
--- a/trie/trienode/node_test.go
+++ b/trie/trienode/node_test.go
@@ -17,13 +17,100 @@
package trienode
import (
+ "bytes"
"crypto/rand"
+ "maps"
+ "reflect"
+ "slices"
"testing"
+ "github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/testrand"
)
+func makeTestSet(owner common.Hash, n int, paths [][]byte) *NodeSet {
+ set := NewNodeSet(owner)
+ for i := 0; i < n*3/4; i++ {
+ path := testrand.Bytes(10)
+ blob := testrand.Bytes(100)
+ set.AddNode(path, NewNodeWithPrev(crypto.Keccak256Hash(blob), blob, testrand.Bytes(100)))
+ }
+ for i := 0; i < n/4; i++ {
+ path := testrand.Bytes(10)
+ set.AddNode(path, NewDeletedWithPrev(testrand.Bytes(100)))
+ }
+ for i := 0; i < len(paths); i++ {
+ if i%3 == 0 {
+ set.AddNode(paths[i], NewDeletedWithPrev(testrand.Bytes(100)))
+ } else {
+ blob := testrand.Bytes(100)
+ set.AddNode(paths[i], NewNodeWithPrev(crypto.Keccak256Hash(blob), blob, testrand.Bytes(100)))
+ }
+ }
+ return set
+}
+
+func copyNodeSet(set *NodeSet) *NodeSet {
+ cpy := &NodeSet{
+ Owner: set.Owner,
+ Leaves: slices.Clone(set.Leaves),
+ updates: set.updates,
+ deletes: set.deletes,
+ Nodes: maps.Clone(set.Nodes),
+ Origins: maps.Clone(set.Origins),
+ }
+ return cpy
+}
+
+func TestNodeSetMerge(t *testing.T) {
+ var shared [][]byte
+ for i := 0; i < 2; i++ {
+ shared = append(shared, testrand.Bytes(10))
+ }
+ owner := testrand.Hash()
+ setA := makeTestSet(owner, 20, shared)
+ cpyA := copyNodeSet(setA)
+
+ setB := makeTestSet(owner, 20, shared)
+ setA.Merge(setB)
+
+ for path, node := range setA.Nodes {
+ nA, inA := cpyA.Nodes[path]
+ nB, inB := setB.Nodes[path]
+
+ switch {
+ case inA && inB:
+ origin := setA.Origins[path]
+ if !bytes.Equal(origin, cpyA.Origins[path]) {
+ t.Errorf("Unexpected origin, path %v: want: %v, got: %v", []byte(path), cpyA.Origins[path], origin)
+ }
+ if !reflect.DeepEqual(node, nB) {
+ t.Errorf("Unexpected node, path %v: want: %v, got: %v", []byte(path), spew.Sdump(nB), spew.Sdump(node))
+ }
+ case !inA && inB:
+ origin := setA.Origins[path]
+ if !bytes.Equal(origin, setB.Origins[path]) {
+ t.Errorf("Unexpected origin, path %v: want: %v, got: %v", []byte(path), setB.Origins[path], origin)
+ }
+ if !reflect.DeepEqual(node, nB) {
+ t.Errorf("Unexpected node, path %v: want: %v, got: %v", []byte(path), spew.Sdump(nB), spew.Sdump(node))
+ }
+ case inA && !inB:
+ origin := setA.Origins[path]
+ if !bytes.Equal(origin, cpyA.Origins[path]) {
+ t.Errorf("Unexpected origin, path %v: want: %v, got: %v", []byte(path), cpyA.Origins[path], origin)
+ }
+ if !reflect.DeepEqual(node, nA) {
+ t.Errorf("Unexpected node, path %v: want: %v, got: %v", []byte(path), spew.Sdump(nA), spew.Sdump(node))
+ }
+ default:
+ t.Errorf("Unexpected node, %v", []byte(path))
+ }
+ }
+}
+
func BenchmarkMerge(b *testing.B) {
b.Run("1K", func(b *testing.B) {
benchmarkMerge(b, 1000)
@@ -42,7 +129,7 @@ func benchmarkMerge(b *testing.B, count int) {
blob := make([]byte, 32)
rand.Read(blob)
hash := crypto.Keccak256Hash(blob)
- s.AddNode(path, New(hash, blob))
+ s.AddNode(path, NewNodeWithPrev(hash, blob, nil))
}
for i := 0; i < count; i++ {
// Random path of 4 nibbles
@@ -53,9 +140,9 @@ func benchmarkMerge(b *testing.B, count int) {
for i := 0; i < b.N; i++ {
// Store set x into a backup
z := NewNodeSet(common.Hash{})
- z.Merge(common.Hash{}, x.Nodes)
+ z.Merge(x)
// Merge y into x
- x.Merge(common.Hash{}, y.Nodes)
+ x.Merge(y)
x = z
}
}
diff --git a/trie/verkle.go b/trie/verkle.go
index 015b8f6590..186ac1f642 100644
--- a/trie/verkle.go
+++ b/trie/verkle.go
@@ -41,36 +41,35 @@ var (
type VerkleTrie struct {
root verkle.VerkleNode
cache *utils.PointCache
- reader *trieReader
+ reader *Reader
+ tracer *PrevalueTracer
}
// NewVerkleTrie constructs a verkle tree based on the specified root hash.
func NewVerkleTrie(root common.Hash, db database.NodeDatabase, cache *utils.PointCache) (*VerkleTrie, error) {
- reader, err := newTrieReader(root, common.Hash{}, db)
+ reader, err := NewReader(root, common.Hash{}, db)
if err != nil {
return nil, err
}
+ t := &VerkleTrie{
+ root: verkle.New(),
+ cache: cache,
+ reader: reader,
+ tracer: NewPrevalueTracer(),
+ }
// Parse the root verkle node if it's not empty.
- node := verkle.New()
if root != types.EmptyVerkleHash && root != types.EmptyRootHash {
- blob, err := reader.node(nil, common.Hash{})
+ blob, err := t.nodeResolver(nil)
if err != nil {
return nil, err
}
- node, err = verkle.ParseNode(blob, 0)
+ node, err := verkle.ParseNode(blob, 0)
if err != nil {
return nil, err
}
+ t.root = node
}
- return &VerkleTrie{
- root: node,
- cache: cache,
- reader: reader,
- }, nil
-}
-
-func (t *VerkleTrie) FlatdbNodeResolver(path []byte) ([]byte, error) {
- return t.reader.node(path, common.Hash{})
+ return t, nil
}
// GetKey returns the sha3 preimage of a hashed key that was previously used
@@ -109,6 +108,17 @@ func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error
return acc, nil
}
+// PrefetchAccount attempts to resolve specific accounts from the database
+// to accelerate subsequent trie operations.
+func (t *VerkleTrie) PrefetchAccount(addresses []common.Address) error {
+ for _, addr := range addresses {
+ if _, err := t.GetAccount(addr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// GetStorage implements state.Trie, retrieving the storage slot with the specified
// account address and storage key. If the specified slot is not in the verkle tree,
// nil will be returned. If the tree is corrupted, an error will be returned.
@@ -121,6 +131,17 @@ func (t *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error)
return common.TrimLeftZeroes(val), nil
}
+// PrefetchStorage attempts to resolve specific storage slots from the database
+// to accelerate subsequent trie operations.
+func (t *VerkleTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
+ for _, key := range keys {
+ if _, err := t.GetStorage(addr, key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// UpdateAccount implements state.Trie, writing the provided account into the tree.
// If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount, codeLen int) error {
@@ -268,7 +289,7 @@ func (t *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet) {
nodeset := trienode.NewNodeSet(common.Hash{})
for _, node := range nodes {
// Hash parameter is not used in pathdb
- nodeset.AddNode(node.Path, trienode.New(common.Hash{}, node.SerializedBytes))
+ nodeset.AddNode(node.Path, trienode.NewNodeWithPrev(common.Hash{}, node.SerializedBytes, t.tracer.Get(node.Path)))
}
// Serialize root commitment form
return t.Hash(), nodeset
@@ -301,6 +322,7 @@ func (t *VerkleTrie) Copy() *VerkleTrie {
root: t.root.Copy(),
cache: t.cache,
reader: t.reader,
+ tracer: t.tracer.Copy(),
}
}
@@ -317,7 +339,7 @@ func (t *VerkleTrie) Proof(posttrie *VerkleTrie, keys [][]byte) (*verkle.VerkleP
if posttrie != nil {
postroot = posttrie.root
}
- proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.FlatdbNodeResolver)
+ proof, _, _, _, err := verkle.MakeVerkleMultiProof(t.root, postroot, keys, t.nodeResolver)
if err != nil {
return nil, nil, err
}
@@ -421,10 +443,15 @@ func (t *VerkleTrie) ToDot() string {
}
func (t *VerkleTrie) nodeResolver(path []byte) ([]byte, error) {
- return t.reader.node(path, common.Hash{})
+ blob, err := t.reader.Node(path, common.Hash{})
+ if err != nil {
+ return nil, err
+ }
+ t.tracer.Put(path, blob)
+ return blob, nil
}
// Witness returns a set containing all trie nodes that have been accessed.
-func (t *VerkleTrie) Witness() map[string]struct{} {
+func (t *VerkleTrie) Witness() map[string][]byte {
panic("not implemented")
}
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index 7cae874064..6438176040 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -219,12 +219,14 @@ type Database struct {
isVerkle bool // Flag if database is used for verkle tree
hasher nodeHasher // Trie node hasher
- config *Config // Configuration for database
- diskdb ethdb.Database // Persistent storage for matured trie nodes
- tree *layerTree // The group for all known layers
- freezer ethdb.ResettableAncientStore // Freezer for storing trie histories, nil possible in tests
- lock sync.RWMutex // Lock to prevent mutations from happening at the same time
- indexer *historyIndexer // History indexer
+ config *Config // Configuration for database
+ diskdb ethdb.Database // Persistent storage for matured trie nodes
+ tree *layerTree // The group for all known layers
+
+ stateFreezer ethdb.ResettableAncientStore // Freezer for storing state histories, nil possible in tests
+ stateIndexer *historyIndexer // History indexer historical state data, nil possible
+
+ lock sync.RWMutex // Lock to prevent mutations from happening at the same time
}
// New attempts to load an already existing layer from a persistent key-value
@@ -275,8 +277,8 @@ func New(diskdb ethdb.Database, config *Config, isVerkle bool) *Database {
log.Crit("Failed to setup the generator", "err", err)
}
// TODO (rjl493456442) disable the background indexing in read-only mode
- if db.freezer != nil && db.config.EnableStateIndexing {
- db.indexer = newHistoryIndexer(db.diskdb, db.freezer, db.tree.bottom().stateID())
+ if db.stateFreezer != nil && db.config.EnableStateIndexing {
+ db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID())
log.Info("Enabled state history indexing")
}
fields := config.fields()
@@ -304,25 +306,26 @@ func (db *Database) repairHistory() error {
if err != nil {
log.Crit("Failed to open state history freezer", "err", err)
}
- db.freezer = freezer
+ db.stateFreezer = freezer
// Reset the entire state histories if the trie database is not initialized
// yet. This action is necessary because these state histories are not
// expected to exist without an initialized trie database.
id := db.tree.bottom().stateID()
if id == 0 {
- frozen, err := db.freezer.Ancients()
+ frozen, err := db.stateFreezer.Ancients()
if err != nil {
log.Crit("Failed to retrieve head of state history", "err", err)
}
if frozen != 0 {
- // TODO(rjl493456442) would be better to group them into a batch.
- //
// Purge all state history indexing data first
- rawdb.DeleteStateHistoryIndexMetadata(db.diskdb)
- rawdb.DeleteStateHistoryIndex(db.diskdb)
- err := db.freezer.Reset()
- if err != nil {
+ batch := db.diskdb.NewBatch()
+ rawdb.DeleteStateHistoryIndexMetadata(batch)
+ rawdb.DeleteStateHistoryIndex(batch)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to purge state history index", "err", err)
+ }
+ if err := db.stateFreezer.Reset(); err != nil {
log.Crit("Failed to reset state histories", "err", err)
}
log.Info("Truncated extraneous state history")
@@ -331,7 +334,7 @@ func (db *Database) repairHistory() error {
}
// Truncate the extra state histories above in freezer in case it's not
// aligned with the disk layer. It might happen after a unclean shutdown.
- pruned, err := truncateFromHead(db.diskdb, db.freezer, id)
+ pruned, err := truncateFromHead(db.stateFreezer, id)
if err != nil {
log.Crit("Failed to truncate extra state histories", "err", err)
}
@@ -507,13 +510,15 @@ func (db *Database) Enable(root common.Hash) error {
// all root->id mappings should be removed as well. Since
// mappings can be huge and might take a while to clear
// them, just leave them in disk and wait for overwriting.
- if db.freezer != nil {
- // TODO(rjl493456442) would be better to group them into a batch.
- //
+ if db.stateFreezer != nil {
// Purge all state history indexing data first
- rawdb.DeleteStateHistoryIndexMetadata(db.diskdb)
- rawdb.DeleteStateHistoryIndex(db.diskdb)
- if err := db.freezer.Reset(); err != nil {
+ batch.Reset()
+ rawdb.DeleteStateHistoryIndexMetadata(batch)
+ rawdb.DeleteStateHistoryIndex(batch)
+ if err := batch.Write(); err != nil {
+ return err
+ }
+ if err := db.stateFreezer.Reset(); err != nil {
return err
}
}
@@ -529,9 +534,9 @@ func (db *Database) Enable(root common.Hash) error {
// To ensure the history indexer always matches the current state, we must:
// 1. Close any existing indexer
// 2. Re-initialize the indexer so it starts indexing from the new state root.
- if db.indexer != nil && db.freezer != nil && db.config.EnableStateIndexing {
- db.indexer.close()
- db.indexer = newHistoryIndexer(db.diskdb, db.freezer, db.tree.bottom().stateID())
+ if db.stateIndexer != nil && db.stateFreezer != nil && db.config.EnableStateIndexing {
+ db.stateIndexer.close()
+ db.stateIndexer = newHistoryIndexer(db.diskdb, db.stateFreezer, db.tree.bottom().stateID())
log.Info("Re-enabled state history indexing")
}
log.Info("Rebuilt trie database", "root", root)
@@ -551,7 +556,7 @@ func (db *Database) Recover(root common.Hash) error {
if err := db.modifyAllowed(); err != nil {
return err
}
- if db.freezer == nil {
+ if db.stateFreezer == nil {
return errors.New("state rollback is non-supported")
}
// Short circuit if the target state is not recoverable
@@ -564,7 +569,7 @@ func (db *Database) Recover(root common.Hash) error {
dl = db.tree.bottom()
)
for dl.rootHash() != root {
- h, err := readHistory(db.freezer, dl.stateID())
+ h, err := readStateHistory(db.stateFreezer, dl.stateID())
if err != nil {
return err
}
@@ -585,7 +590,7 @@ func (db *Database) Recover(root common.Hash) error {
if err := db.diskdb.SyncKeyValue(); err != nil {
return err
}
- _, err := truncateFromHead(db.diskdb, db.freezer, dl.stateID())
+ _, err := truncateFromHead(db.stateFreezer, dl.stateID())
if err != nil {
return err
}
@@ -610,15 +615,15 @@ func (db *Database) Recoverable(root common.Hash) bool {
return false
}
// This is a temporary workaround for the unavailability of the freezer in
- // dev mode. As a consequence, the Pathdb loses the ability for deep reorg
+ // dev mode. As a consequence, the database loses the ability for deep reorg
// in certain cases.
// TODO(rjl493456442): Implement the in-memory ancient store.
- if db.freezer == nil {
+ if db.stateFreezer == nil {
return false
}
// Ensure the requested state is a canonical state and all state
- // histories in range [id+1, disklayer.ID] are present and complete.
- return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error {
+ // histories in range [id+1, dl.ID] are present and complete.
+ return checkStateHistories(db.stateFreezer, *id+1, dl.stateID()-*id, func(m *meta) error {
if m.parent != root {
return errors.New("unexpected state history")
}
@@ -646,14 +651,14 @@ func (db *Database) Close() error {
dl.resetCache() // release the memory held by clean cache
// Terminate the background state history indexer
- if db.indexer != nil {
- db.indexer.close()
+ if db.stateIndexer != nil {
+ db.stateIndexer.close()
}
// Close the attached state history freezer.
- if db.freezer == nil {
+ if db.stateFreezer == nil {
return nil
}
- return db.freezer.Close()
+ return db.stateFreezer.Close()
}
// Size returns the current storage size of the memory cache in front of the
@@ -704,7 +709,7 @@ func (db *Database) journalPath() string {
// End: State ID of the last history for the query. 0 implies the last available
// object is selected as the ending point. Note end is included in the query.
func (db *Database) AccountHistory(address common.Address, start, end uint64) (*HistoryStats, error) {
- return accountHistory(db.freezer, address, start, end)
+ return accountHistory(db.stateFreezer, address, start, end)
}
// StorageHistory inspects the storage history within the specified range.
@@ -717,22 +722,22 @@ func (db *Database) AccountHistory(address common.Address, start, end uint64) (*
//
// Note, slot refers to the hash of the raw slot key.
func (db *Database) StorageHistory(address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
- return storageHistory(db.freezer, address, slot, start, end)
+ return storageHistory(db.stateFreezer, address, slot, start, end)
}
// HistoryRange returns the block numbers associated with earliest and latest
// state history in the local store.
func (db *Database) HistoryRange() (uint64, uint64, error) {
- return historyRange(db.freezer)
+ return historyRange(db.stateFreezer)
}
// IndexProgress returns the indexing progress made so far. It provides the
// number of states that remain unindexed.
func (db *Database) IndexProgress() (uint64, error) {
- if db.indexer == nil {
+ if db.stateIndexer == nil {
return 0, nil
}
- return db.indexer.progress()
+ return db.stateIndexer.progress()
}
// AccountIterator creates a new account iterator for the specified root hash and
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index e9a1850ee0..47d13e54a4 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -426,7 +426,7 @@ func (t *tester) verifyHistory() error {
for i, root := range t.roots {
// The state history related to the state above disk layer should not exist.
if i > bottom {
- _, err := readHistory(t.db.freezer, uint64(i+1))
+ _, err := readStateHistory(t.db.stateFreezer, uint64(i+1))
if err == nil {
return errors.New("unexpected state history")
}
@@ -434,7 +434,7 @@ func (t *tester) verifyHistory() error {
}
// The state history related to the state below or equal to the disk layer
// should exist.
- obj, err := readHistory(t.db.freezer, uint64(i+1))
+ obj, err := readStateHistory(t.db.stateFreezer, uint64(i+1))
if err != nil {
return err
}
@@ -568,7 +568,7 @@ func TestDisable(t *testing.T) {
t.Fatal("Failed to clean journal")
}
// Ensure all trie histories are removed
- n, err := tester.db.freezer.Ancients()
+ n, err := tester.db.stateFreezer.Ancients()
if err != nil {
t.Fatal("Failed to clean state history")
}
@@ -724,7 +724,7 @@ func TestTailTruncateHistory(t *testing.T) {
tester.db.Close()
tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}, false)
- head, err := tester.db.freezer.Ancients()
+ head, err := tester.db.stateFreezer.Ancients()
if err != nil {
t.Fatalf("Failed to obtain freezer head")
}
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index 06f0a7285f..13df6251e8 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -323,6 +323,69 @@ func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *no
return newDiffLayer(dl, root, id, block, nodes, states)
}
+// writeStateHistory stores the state history and indexes if indexing is
+// permitted.
+//
+// What's more, this function also returns a flag indicating whether the
+// buffer flushing is required, ensuring the persistent state ID is always
+// greater than or equal to the first history ID.
+func (dl *diskLayer) writeStateHistory(diff *diffLayer) (bool, error) {
+ // Short circuit if state history is not permitted
+ if dl.db.stateFreezer == nil {
+ return false, nil
+ }
+ // Bail out with an error if writing the state history fails.
+ // This can happen, for example, if the device is full.
+ err := writeStateHistory(dl.db.stateFreezer, diff)
+ if err != nil {
+ return false, err
+ }
+ // Notify the state history indexer for newly created history
+ if dl.db.stateIndexer != nil {
+ if err := dl.db.stateIndexer.extend(diff.stateID()); err != nil {
+ return false, err
+ }
+ }
+ // Determine if the persisted history object has exceeded the
+ // configured limitation.
+ limit := dl.db.config.StateHistory
+ if limit == 0 {
+ return false, nil
+ }
+ tail, err := dl.db.stateFreezer.Tail()
+ if err != nil {
+ return false, err
+ } // firstID = tail+1
+
+ // length = diff.stateID()-firstID+1 = diff.stateID()-tail
+ if diff.stateID()-tail <= limit {
+ return false, nil
+ }
+ newFirst := diff.stateID() - limit + 1 // the id of first history **after truncation**
+
+ // In a rare case where the ID of the first history object (after tail
+ // truncation) exceeds the persisted state ID, we must take corrective
+ // steps:
+ //
+ // - Skip tail truncation temporarily, avoid the scenario that associated
+ // history of persistent state is removed
+ //
+ // - Force a commit of the cached dirty states into persistent state
+ //
+ // These measures ensure the persisted state ID always remains greater
+ // than or equal to the first history ID.
+ if persistentID := rawdb.ReadPersistentStateID(dl.db.diskdb); persistentID < newFirst {
+ log.Debug("Skip tail truncation", "persistentID", persistentID, "tailID", tail+1, "headID", diff.stateID(), "limit", limit)
+ return true, nil
+ }
+ pruned, err := truncateFromTail(dl.db.stateFreezer, newFirst-1)
+ if err != nil {
+ return false, err
+ }
+ log.Debug("Pruned state history", "items", pruned, "tailid", newFirst)
+ return false, nil
+}
+
// commit merges the given bottom-most diff layer into the node buffer
// and returns a newly constructed disk layer. Note the current disk
// layer must be tagged as stale first to prevent re-access.
@@ -333,34 +396,9 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// Construct and store the state history first. If crash happens after storing
// the state history but without flushing the corresponding states(journal),
// the stored state history will be truncated from head in the next restart.
- var (
- overflow bool
- oldest uint64
- )
- if dl.db.freezer != nil {
- // Bail out with an error if writing the state history fails.
- // This can happen, for example, if the device is full.
- err := writeHistory(dl.db.freezer, bottom)
- if err != nil {
- return nil, err
- }
- // Determine if the persisted history object has exceeded the configured
- // limitation, set the overflow as true if so.
- tail, err := dl.db.freezer.Tail()
- if err != nil {
- return nil, err
- }
- limit := dl.db.config.StateHistory
- if limit != 0 && bottom.stateID()-tail > limit {
- overflow = true
- oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation**
- }
- // Notify the state history indexer for newly created history
- if dl.db.indexer != nil {
- if err := dl.db.indexer.extend(bottom.stateID()); err != nil {
- return nil, err
- }
- }
+ flush, err := dl.writeStateHistory(bottom)
+ if err != nil {
+ return nil, err
}
// Mark the diskLayer as stale before applying any mutations on top.
dl.stale = true
@@ -373,21 +411,13 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
}
rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID())
- // In a unique scenario where the ID of the oldest history object (after tail
- // truncation) surpasses the persisted state ID, we take the necessary action
- // of forcibly committing the cached dirty states to ensure that the persisted
- // state ID remains higher.
- persistedID := rawdb.ReadPersistentStateID(dl.db.diskdb)
- if !force && persistedID < oldest {
- force = true
- }
// Merge the trie nodes and flat states of the bottom-most diff layer into the
// buffer as the combined layer.
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet)
// Terminate the background state snapshot generation before mutating the
// persistent state.
- if combined.full() || force {
+ if combined.full() || force || flush {
// Wait until the previous frozen buffer is fully flushed
if dl.frozen != nil {
if err := dl.frozen.waitFlush(); err != nil {
@@ -418,7 +448,7 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
// Freeze the live buffer and schedule background flushing
dl.frozen = combined
- dl.frozen.flush(bottom.root, dl.db.diskdb, dl.db.freezer, progress, dl.nodes, dl.states, bottom.stateID(), func() {
+ dl.frozen.flush(bottom.root, dl.db.diskdb, dl.db.stateFreezer, progress, dl.nodes, dl.states, bottom.stateID(), func() {
// Resume the background generation if it's not completed yet.
// The generator is assumed to be available if the progress is
// not nil.
@@ -431,8 +461,8 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
}
})
// Block until the frozen buffer is fully flushed out if the async flushing
- // is not allowed, or if the oldest history surpasses the persisted state ID.
- if dl.db.config.NoAsyncFlush || persistedID < oldest {
+ // is not allowed.
+ if dl.db.config.NoAsyncFlush {
if err := dl.frozen.waitFlush(); err != nil {
return nil, err
}
@@ -445,20 +475,11 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
if dl.generator != nil {
ndl.setGenerator(dl.generator)
}
- // To remove outdated history objects from the end, we set the 'tail' parameter
- // to 'oldest-1' due to the offset between the freezer index and the history ID.
- if overflow {
- pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1)
- if err != nil {
- return nil, err
- }
- log.Debug("Pruned state history", "items", pruned, "tailid", oldest)
- }
return ndl, nil
}
// revert applies the given state history and return a reverted disk layer.
-func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
+func (dl *diskLayer) revert(h *stateHistory) (*diskLayer, error) {
start := time.Now()
if h.meta.root != dl.rootHash() {
return nil, errUnexpectedHistory
@@ -484,8 +505,8 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
dl.stale = true
// Unindex the corresponding state history
- if dl.db.indexer != nil {
- if err := dl.db.indexer.shorten(dl.id); err != nil {
+ if dl.db.stateIndexer != nil {
+ if err := dl.db.stateIndexer.shorten(dl.id); err != nil {
return nil, err
}
}
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index 47f224170d..bbedd52f34 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -1,4 +1,4 @@
-// Copyright 2023 The go-ethereum Authors
+// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -12,592 +12,26 @@
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
+// along with the go-ethereum library. If not, see | State 1 |---->| ... |---->| State n |
-// +------------+ +---------+ +---------+ +---------+
-//
-// +-----------+ +------+ +-----------+
-// | History 1 |----> | ... |---->| History n |
-// +-----------+ +------+ +-----------+
-//
-// # Rollback
-//
-// If the system wants to roll back to a previous state n, it needs to ensure
-// all history objects from n+1 up to the current disk layer are existent. The
-// history objects are applied to the state in reverse order, starting from the
-// current disk layer.
-
-const (
- accountIndexSize = common.AddressLength + 13 // The length of encoded account index
- slotIndexSize = common.HashLength + 5 // The length of encoded slot index
- historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
-
- stateHistoryV0 = uint8(0) // initial version of state history structure
- stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
- historyVersion = stateHistoryV1 // the default state history version
+var (
+ errHeadTruncationOutOfRange = errors.New("history head truncation out of range")
+ errTailTruncationOutOfRange = errors.New("history tail truncation out of range")
)
-// Each state history entry is consisted of five elements:
-//
-// # metadata
-// This object contains a few meta fields, such as the associated state root,
-// block number, version tag and so on. This object may contain an extra
-// accountHash list which means the storage changes belong to these accounts
-// are not complete due to large contract destruction. The incomplete history
-// can not be used for rollback and serving archive state request.
-//
-// # account index
-// This object contains some index information of account. For example, offset
-// and length indicate the location of the data belonging to the account. Besides,
-// storageOffset and storageSlots indicate the storage modification location
-// belonging to the account.
-//
-// The size of each account index is *fixed*, and all indexes are sorted
-// lexicographically. Thus binary search can be performed to quickly locate a
-// specific account.
-//
-// # account data
-// Account data is a concatenated byte stream composed of all account data.
-// The account data can be solved by the offset and length info indicated
-// by corresponding account index.
-//
-// fixed size
-// ^ ^
-// / \
-// +-----------------+-----------------+----------------+-----------------+
-// | Account index 1 | Account index 2 | ... | Account index N |
-// +-----------------+-----------------+----------------+-----------------+
-// |
-// | length
-// offset |----------------+
-// v v
-// +----------------+----------------+----------------+----------------+
-// | Account data 1 | Account data 2 | ... | Account data N |
-// +----------------+----------------+----------------+----------------+
-//
-// # storage index
-// This object is similar with account index. It's also fixed size and contains
-// the location info of storage slot data.
-//
-// # storage data
-// Storage data is a concatenated byte stream composed of all storage slot data.
-// The storage slot data can be solved by the location info indicated by
-// corresponding account index and storage slot index.
-//
-// fixed size
-// ^ ^
-// / \
-// +-----------------+-----------------+----------------+-----------------+
-// | Account index 1 | Account index 2 | ... | Account index N |
-// +-----------------+-----------------+----------------+-----------------+
-// |
-// | storage slots
-// storage offset |-----------------------------------------------------+
-// v v
-// +-----------------+-----------------+-----------------+
-// | storage index 1 | storage index 2 | storage index 3 |
-// +-----------------+-----------------+-----------------+
-// | length
-// offset |-------------+
-// v v
-// +-------------+
-// | slot data 1 |
-// +-------------+
-
-// accountIndex describes the metadata belonging to an account.
-type accountIndex struct {
- address common.Address // The address of account
- length uint8 // The length of account data, size limited by 255
- offset uint32 // The offset of item in account data table
- storageOffset uint32 // The offset of storage index in storage index table
- storageSlots uint32 // The number of mutated storage slots belonging to the account
-}
-
-// encode packs account index into byte stream.
-func (i *accountIndex) encode() []byte {
- var buf [accountIndexSize]byte
- copy(buf[:], i.address.Bytes())
- buf[common.AddressLength] = i.length
- binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset)
- binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset)
- binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots)
- return buf[:]
-}
-
-// decode unpacks account index from byte stream.
-func (i *accountIndex) decode(blob []byte) {
- i.address = common.BytesToAddress(blob[:common.AddressLength])
- i.length = blob[common.AddressLength]
- i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
- i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
- i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
-}
-
-// slotIndex describes the metadata belonging to a storage slot.
-type slotIndex struct {
- // the identifier of the storage slot. Specifically
- // in v0, it's the hash of the raw storage slot key (32 bytes);
- // in v1, it's the raw storage slot key (32 bytes);
- id common.Hash
- length uint8 // The length of storage slot, up to 32 bytes defined in protocol
- offset uint32 // The offset of item in storage slot data table
-}
-
-// encode packs slot index into byte stream.
-func (i *slotIndex) encode() []byte {
- var buf [slotIndexSize]byte
- copy(buf[:common.HashLength], i.id.Bytes())
- buf[common.HashLength] = i.length
- binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
- return buf[:]
-}
-
-// decode unpack slot index from the byte stream.
-func (i *slotIndex) decode(blob []byte) {
- i.id = common.BytesToHash(blob[:common.HashLength])
- i.length = blob[common.HashLength]
- i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
-}
-
-// meta describes the meta data of state history object.
-type meta struct {
- version uint8 // version tag of history object
- parent common.Hash // prev-state root before the state transition
- root common.Hash // post-state root after the state transition
- block uint64 // associated block number
-}
-
-// encode packs the meta object into byte stream.
-func (m *meta) encode() []byte {
- buf := make([]byte, historyMetaSize)
- buf[0] = m.version
- copy(buf[1:1+common.HashLength], m.parent.Bytes())
- copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
- binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
- return buf[:]
-}
-
-// decode unpacks the meta object from byte stream.
-func (m *meta) decode(blob []byte) error {
- if len(blob) < 1 {
- return errors.New("no version tag")
- }
- switch blob[0] {
- case stateHistoryV0, stateHistoryV1:
- if len(blob) != historyMetaSize {
- return fmt.Errorf("invalid state history meta, len: %d", len(blob))
- }
- m.version = blob[0]
- m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
- m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
- m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
- return nil
- default:
- return fmt.Errorf("unknown version %d", blob[0])
- }
-}
-
-// history represents a set of state changes belong to a block along with
-// the metadata including the state roots involved in the state transition.
-// State history objects in disk are linked with each other by a unique id
-// (8-bytes integer), the oldest state history object can be pruned on demand
-// in order to control the storage size.
-type history struct {
- meta *meta // Meta data of history
- accounts map[common.Address][]byte // Account data keyed by its address hash
- accountList []common.Address // Sorted account hash list
- storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
- storageList map[common.Address][]common.Hash // Sorted slot hash list
-}
-
-// newHistory constructs the state history object with provided state change set.
-func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
- var (
- accountList = slices.SortedFunc(maps.Keys(accounts), common.Address.Cmp)
- storageList = make(map[common.Address][]common.Hash)
- )
- for addr, slots := range storages {
- storageList[addr] = slices.SortedFunc(maps.Keys(slots), common.Hash.Cmp)
- }
- version := historyVersion
- if !rawStorageKey {
- version = stateHistoryV0
- }
- return &history{
- meta: &meta{
- version: version,
- parent: parent,
- root: root,
- block: block,
- },
- accounts: accounts,
- accountList: accountList,
- storages: storages,
- storageList: storageList,
- }
-}
-
-// stateSet returns the state set, keyed by the hash of the account address
-// and the hash of the storage slot key.
-func (h *history) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) {
- var (
- accounts = make(map[common.Hash][]byte)
- storages = make(map[common.Hash]map[common.Hash][]byte)
- )
- for addr, blob := range h.accounts {
- addrHash := crypto.Keccak256Hash(addr.Bytes())
- accounts[addrHash] = blob
-
- storage, exist := h.storages[addr]
- if !exist {
- continue
- }
- if h.meta.version == stateHistoryV0 {
- storages[addrHash] = storage
- } else {
- subset := make(map[common.Hash][]byte)
- for key, slot := range storage {
- subset[crypto.Keccak256Hash(key.Bytes())] = slot
- }
- storages[addrHash] = subset
- }
- }
- return accounts, storages
-}
-
-// encode serializes the state history and returns four byte streams represent
-// concatenated account/storage data, account/storage indexes respectively.
-func (h *history) encode() ([]byte, []byte, []byte, []byte) {
- var (
- slotNumber uint32 // the number of processed slots
- accountData []byte // the buffer for concatenated account data
- storageData []byte // the buffer for concatenated storage data
- accountIndexes []byte // the buffer for concatenated account index
- storageIndexes []byte // the buffer for concatenated storage index
- )
- for _, addr := range h.accountList {
- accIndex := accountIndex{
- address: addr,
- length: uint8(len(h.accounts[addr])),
- offset: uint32(len(accountData)),
- }
- slots, exist := h.storages[addr]
- if exist {
- // Encode storage slots in order
- for _, slotHash := range h.storageList[addr] {
- sIndex := slotIndex{
- id: slotHash,
- length: uint8(len(slots[slotHash])),
- offset: uint32(len(storageData)),
- }
- storageData = append(storageData, slots[slotHash]...)
- storageIndexes = append(storageIndexes, sIndex.encode()...)
- }
- // Fill up the storage meta in account index
- accIndex.storageOffset = slotNumber
- accIndex.storageSlots = uint32(len(slots))
- slotNumber += uint32(len(slots))
- }
- accountData = append(accountData, h.accounts[addr]...)
- accountIndexes = append(accountIndexes, accIndex.encode()...)
- }
- return accountData, storageData, accountIndexes, storageIndexes
-}
-
-// decoder wraps the byte streams for decoding with extra meta fields.
-type decoder struct {
- accountData []byte // the buffer for concatenated account data
- storageData []byte // the buffer for concatenated storage data
- accountIndexes []byte // the buffer for concatenated account index
- storageIndexes []byte // the buffer for concatenated storage index
-
- lastAccount *common.Address // the address of last resolved account
- lastAccountRead uint32 // the read-cursor position of account data
- lastSlotIndexRead uint32 // the read-cursor position of storage slot index
- lastSlotDataRead uint32 // the read-cursor position of storage slot data
-}
-
-// verify validates the provided byte streams for decoding state history. A few
-// checks will be performed to quickly detect data corruption. The byte stream
-// is regarded as corrupted if:
-//
-// - account indexes buffer is empty(empty state set is invalid)
-// - account indexes/storage indexer buffer is not aligned
-//
-// note, these situations are allowed:
-//
-// - empty account data: all accounts were not present
-// - empty storage set: no slots are modified
-func (r *decoder) verify() error {
- if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
- return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
- }
- if len(r.storageIndexes)%slotIndexSize != 0 {
- return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
- }
- return nil
-}
-
-// readAccount parses the account from the byte stream with specified position.
-func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
- // Decode account index from the index byte stream.
- var index accountIndex
- if (pos+1)*accountIndexSize > len(r.accountIndexes) {
- return accountIndex{}, nil, errors.New("account data buffer is corrupted")
- }
- index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
-
- // Perform validation before parsing account data, ensure
- // - account is sorted in order in byte stream
- // - account data is strictly encoded with no gap inside
- // - account data is not out-of-slice
- if r.lastAccount != nil { // zero address is possible
- if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
- return accountIndex{}, nil, errors.New("account is not in order")
- }
- }
- if index.offset != r.lastAccountRead {
- return accountIndex{}, nil, errors.New("account data buffer is gaped")
- }
- last := index.offset + uint32(index.length)
- if uint32(len(r.accountData)) < last {
- return accountIndex{}, nil, errors.New("account data buffer is corrupted")
- }
- data := r.accountData[index.offset:last]
-
- r.lastAccount = &index.address
- r.lastAccountRead = last
-
- return index, data, nil
-}
-
-// readStorage parses the storage slots from the byte stream with specified account.
-func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
- var (
- last *common.Hash
- count = int(accIndex.storageSlots)
- list = make([]common.Hash, 0, count)
- storage = make(map[common.Hash][]byte, count)
- )
- for j := 0; j < count; j++ {
- var (
- index slotIndex
- start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
- end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
- )
- // Perform validation before parsing storage slot data, ensure
- // - slot index is not out-of-slice
- // - slot data is not out-of-slice
- // - slot is sorted in order in byte stream
- // - slot indexes is strictly encoded with no gap inside
- // - slot data is strictly encoded with no gap inside
- if start != r.lastSlotIndexRead {
- return nil, nil, errors.New("storage index buffer is gapped")
- }
- if uint32(len(r.storageIndexes)) < end {
- return nil, nil, errors.New("storage index buffer is corrupted")
- }
- index.decode(r.storageIndexes[start:end])
-
- if last != nil {
- if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
- return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
- }
- }
- if index.offset != r.lastSlotDataRead {
- return nil, nil, errors.New("storage data buffer is gapped")
- }
- sEnd := index.offset + uint32(index.length)
- if uint32(len(r.storageData)) < sEnd {
- return nil, nil, errors.New("storage data buffer is corrupted")
- }
- storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
- list = append(list, index.id)
-
- last = &index.id
- r.lastSlotIndexRead = end
- r.lastSlotDataRead = sEnd
- }
- return list, storage, nil
-}
-
-// decode deserializes the account and storage data from the provided byte stream.
-func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
- var (
- count = len(accountIndexes) / accountIndexSize
- accounts = make(map[common.Address][]byte, count)
- storages = make(map[common.Address]map[common.Hash][]byte)
- accountList = make([]common.Address, 0, count)
- storageList = make(map[common.Address][]common.Hash)
-
- r = &decoder{
- accountData: accountData,
- storageData: storageData,
- accountIndexes: accountIndexes,
- storageIndexes: storageIndexes,
- }
- )
- if err := r.verify(); err != nil {
- return err
- }
- for i := 0; i < count; i++ {
- // Resolve account first
- accIndex, accData, err := r.readAccount(i)
- if err != nil {
- return err
- }
- accounts[accIndex.address] = accData
- accountList = append(accountList, accIndex.address)
-
- // Resolve storage slots
- slotList, slotData, err := r.readStorage(accIndex)
- if err != nil {
- return err
- }
- if len(slotList) > 0 {
- storageList[accIndex.address] = slotList
- storages[accIndex.address] = slotData
- }
- }
- h.accounts = accounts
- h.accountList = accountList
- h.storages = storages
- h.storageList = storageList
- return nil
-}
-
-// readHistory reads and decodes the state history object by the given id.
-func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) {
- mData, accountIndexes, storageIndexes, accountData, storageData, err := rawdb.ReadStateHistory(reader, id)
- if err != nil {
- return nil, err
- }
- var m meta
- if err := m.decode(mData); err != nil {
- return nil, err
- }
- h := history{meta: &m}
- if err := h.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
- return nil, err
- }
- return &h, nil
-}
-
-// readHistories reads and decodes a list of state histories with the specific
-// history range.
-func readHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*history, error) {
- var histories []*history
- metaList, aIndexList, sIndexList, aDataList, sDataList, err := rawdb.ReadStateHistoryList(freezer, start, count)
- if err != nil {
- return nil, err
- }
- for i := 0; i < len(metaList); i++ {
- var m meta
- if err := m.decode(metaList[i]); err != nil {
- return nil, err
- }
- h := history{meta: &m}
- if err := h.decode(aDataList[i], sDataList[i], aIndexList[i], sIndexList[i]); err != nil {
- return nil, err
- }
- histories = append(histories, &h)
- }
- return histories, nil
-}
-
-// writeHistory persists the state history with the provided state set.
-func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
- // Short circuit if state set is not available.
- if dl.states == nil {
- return errors.New("state change set is not available")
- }
- var (
- start = time.Now()
- history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
- )
- accountData, storageData, accountIndex, storageIndex := history.encode()
- dataSize := common.StorageSize(len(accountData) + len(storageData))
- indexSize := common.StorageSize(len(accountIndex) + len(storageIndex))
-
- // Write history data into five freezer table respectively.
- if err := rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData); err != nil {
- return err
- }
- historyDataBytesMeter.Mark(int64(dataSize))
- historyIndexBytesMeter.Mark(int64(indexSize))
- historyBuildTimeMeter.UpdateSince(start)
- log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start)))
-
- return nil
-}
-
-// checkHistories retrieves a batch of meta objects with the specified range
-// and performs the callback on each item.
-func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
- for count > 0 {
- number := count
- if number > 10000 {
- number = 10000 // split the big read into small chunks
- }
- blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number)
- if err != nil {
- return err
- }
- for _, blob := range blobs {
- var dec meta
- if err := dec.decode(blob); err != nil {
- return err
- }
- if err := check(&dec); err != nil {
- return err
- }
- }
- count -= uint64(len(blobs))
- start += uint64(len(blobs))
- }
- return nil
-}
-
-// truncateFromHead removes the extra state histories from the head with the given
-// parameters. It returns the number of items removed from the head.
-func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64) (int, error) {
+// truncateFromHead removes excess elements from the head of the freezer based
+// on the given parameters. It returns the number of items that were removed.
+func truncateFromHead(store ethdb.AncientStore, nhead uint64) (int, error) {
ohead, err := store.Ancients()
if err != nil {
return 0, err
@@ -606,40 +40,28 @@ func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64)
if err != nil {
return 0, err
}
- // Ensure that the truncation target falls within the specified range.
+ log.Info("Truncating from head", "ohead", ohead, "tail", otail, "nhead", nhead)
+
+ // Ensure that the truncation target falls within the valid range.
if ohead < nhead || nhead < otail {
- return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, nhead)
+ return 0, fmt.Errorf("%w, tail: %d, head: %d, target: %d", errHeadTruncationOutOfRange, otail, ohead, nhead)
}
// Short circuit if nothing to truncate.
if ohead == nhead {
return 0, nil
}
- // Load the meta objects in range [nhead+1, ohead]
- blobs, err := rawdb.ReadStateHistoryMetaList(store, nhead+1, ohead-nhead)
- if err != nil {
- return 0, err
- }
- batch := db.NewBatch()
- for _, blob := range blobs {
- var m meta
- if err := m.decode(blob); err != nil {
- return 0, err
- }
- rawdb.DeleteStateID(batch, m.root)
- }
- if err := batch.Write(); err != nil {
- return 0, err
- }
ohead, err = store.TruncateHead(nhead)
if err != nil {
return 0, err
}
+ // Associated root->id mappings are left in the database and wait
+ // for overwriting.
return int(ohead - nhead), nil
}
-// truncateFromTail removes the extra state histories from the tail with the given
-// parameters. It returns the number of items removed from the tail.
-func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64) (int, error) {
+// truncateFromTail removes excess elements from the end of the freezer based
+// on the given parameters. It returns the number of items that were removed.
+func truncateFromTail(store ethdb.AncientStore, ntail uint64) (int, error) {
ohead, err := store.Ancients()
if err != nil {
return 0, err
@@ -648,33 +70,18 @@ func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64)
if err != nil {
return 0, err
}
- // Ensure that the truncation target falls within the specified range.
+ // Ensure that the truncation target falls within the valid range.
if otail > ntail || ntail > ohead {
- return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, ntail)
+ return 0, fmt.Errorf("%w, tail: %d, head: %d, target: %d", errTailTruncationOutOfRange, otail, ohead, ntail)
}
// Short circuit if nothing to truncate.
if otail == ntail {
return 0, nil
}
- // Load the meta objects in range [otail+1, ntail]
- blobs, err := rawdb.ReadStateHistoryMetaList(store, otail+1, ntail-otail)
- if err != nil {
- return 0, err
- }
- batch := db.NewBatch()
- for _, blob := range blobs {
- var m meta
- if err := m.decode(blob); err != nil {
- return 0, err
- }
- rawdb.DeleteStateID(batch, m.root)
- }
- if err := batch.Write(); err != nil {
- return 0, err
- }
otail, err = store.TruncateTail(ntail)
if err != nil {
return 0, err
}
+ // Associated root->id mappings are left in the database.
return int(ntail - otail), nil
}
diff --git a/triedb/pathdb/history_index_test.go b/triedb/pathdb/history_index_test.go
index 7b24b86fd6..c83c33ffbd 100644
--- a/triedb/pathdb/history_index_test.go
+++ b/triedb/pathdb/history_index_test.go
@@ -180,7 +180,7 @@ func TestBatchIndexerWrite(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
batch = newBatchIndexer(db, false)
- histories = makeHistories(10)
+ histories = makeStateHistories(10)
)
for i, h := range histories {
if err := batch.process(h, uint64(i+1)); err != nil {
@@ -257,7 +257,7 @@ func TestBatchIndexerDelete(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
bw = newBatchIndexer(db, false)
- histories = makeHistories(10)
+ histories = makeStateHistories(10)
)
// Index histories
for i, h := range histories {
diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go
index 054d43e946..14b9af5367 100644
--- a/triedb/pathdb/history_indexer.go
+++ b/triedb/pathdb/history_indexer.go
@@ -93,7 +93,7 @@ func newBatchIndexer(db ethdb.KeyValueStore, delete bool) *batchIndexer {
// process iterates through the accounts and their associated storage slots in the
// state history, tracking the mapping between state and history IDs.
-func (b *batchIndexer) process(h *history, historyID uint64) error {
+func (b *batchIndexer) process(h *stateHistory, historyID uint64) error {
for _, address := range h.accountList {
addrHash := crypto.Keccak256Hash(address.Bytes())
b.counter += 1
@@ -241,7 +241,7 @@ func indexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancient
}
return fmt.Errorf("history indexing is out of order, last: %s, requested: %d", last, historyID)
}
- h, err := readHistory(freezer, historyID)
+ h, err := readStateHistory(freezer, historyID)
if err != nil {
return err
}
@@ -271,7 +271,7 @@ func unindexSingle(historyID uint64, db ethdb.KeyValueStore, freezer ethdb.Ancie
}
return fmt.Errorf("history unindexing is out of order, last: %s, requested: %d", last, historyID)
}
- h, err := readHistory(freezer, historyID)
+ h, err := readStateHistory(freezer, historyID)
if err != nil {
return err
}
@@ -524,7 +524,7 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID
if count > historyReadBatch {
count = historyReadBatch
}
- histories, err := readHistories(i.freezer, current, count)
+ histories, err := readStateHistories(i.freezer, current, count)
if err != nil {
// The history read might fall if the history is truncated from
// head due to revert operation.
@@ -543,12 +543,10 @@ func (i *indexIniter) index(done chan struct{}, interrupt *atomic.Int32, lastID
logged = time.Now()
var (
- left = lastID - current + 1
- done = current - beginID
- speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
+ left = lastID - current + 1
+ done = current - beginID
)
- // Override the ETA if larger than the largest until now
- eta := time.Duration(left/speed) * time.Millisecond
+ eta := common.CalculateETA(done, left, time.Since(start))
log.Info("Indexing state history", "processed", done, "left", left, "elapsed", common.PrettyDuration(time.Since(start)), "eta", common.PrettyDuration(eta))
}
}
@@ -598,14 +596,17 @@ func checkVersion(disk ethdb.KeyValueStore) {
if err == nil && m.Version == stateIndexVersion {
return
}
- // TODO(rjl493456442) would be better to group them into a batch.
- rawdb.DeleteStateHistoryIndexMetadata(disk)
- rawdb.DeleteStateHistoryIndex(disk)
-
version := "unknown"
if err == nil {
version = fmt.Sprintf("%d", m.Version)
}
+
+ batch := disk.NewBatch()
+ rawdb.DeleteStateHistoryIndexMetadata(batch)
+ rawdb.DeleteStateHistoryIndex(batch)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to purge state history index", "err", err)
+ }
log.Info("Cleaned up obsolete state history index", "version", version, "want", stateIndexVersion)
}
diff --git a/triedb/pathdb/history_indexer_test.go b/triedb/pathdb/history_indexer_test.go
index abfcafc945..96c87ccb1b 100644
--- a/triedb/pathdb/history_indexer_test.go
+++ b/triedb/pathdb/history_indexer_test.go
@@ -32,7 +32,7 @@ func TestHistoryIndexerShortenDeadlock(t *testing.T) {
freezer, _ := rawdb.NewStateFreezer(t.TempDir(), false, false)
defer freezer.Close()
- histories := makeHistories(100)
+ histories := makeStateHistories(100)
for i, h := range histories {
accountData, storageData, accountIndex, storageIndex := h.encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go
index 9458e2478b..9b4eea27b4 100644
--- a/triedb/pathdb/history_inspect.go
+++ b/triedb/pathdb/history_inspect.go
@@ -61,7 +61,7 @@ func sanitizeRange(start, end uint64, freezer ethdb.AncientReader) (uint64, uint
return first, last, nil
}
-func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*history, *HistoryStats)) (*HistoryStats, error) {
+func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory func(*stateHistory, *HistoryStats)) (*HistoryStats, error) {
var (
stats = &HistoryStats{}
init = time.Now()
@@ -74,7 +74,7 @@ func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory fu
for id := start; id <= end; id += 1 {
// The entire history object is decoded, although it's unnecessary for
// account inspection. TODO(rjl493456442) optimization is worthwhile.
- h, err := readHistory(freezer, id)
+ h, err := readStateHistory(freezer, id)
if err != nil {
return nil, err
}
@@ -98,7 +98,7 @@ func inspectHistory(freezer ethdb.AncientReader, start, end uint64, onHistory fu
// accountHistory inspects the account history within the range.
func accountHistory(freezer ethdb.AncientReader, address common.Address, start, end uint64) (*HistoryStats, error) {
- return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
+ return inspectHistory(freezer, start, end, func(h *stateHistory, stats *HistoryStats) {
blob, exists := h.accounts[address]
if !exists {
return
@@ -111,7 +111,7 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
// storageHistory inspects the storage history within the range.
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
slotHash := crypto.Keccak256Hash(slot.Bytes())
- return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
+ return inspectHistory(freezer, start, end, func(h *stateHistory, stats *HistoryStats) {
slots, exists := h.storages[address]
if !exists {
return
@@ -145,11 +145,11 @@ func historyRange(freezer ethdb.AncientReader) (uint64, uint64, error) {
}
last := head - 1
- fh, err := readHistory(freezer, first)
+ fh, err := readStateHistory(freezer, first)
if err != nil {
return 0, 0, err
}
- lh, err := readHistory(freezer, last)
+ lh, err := readStateHistory(freezer, last)
if err != nil {
return 0, 0, err
}
diff --git a/triedb/pathdb/history_reader.go b/triedb/pathdb/history_reader.go
index d0ecdf035f..a11297b3f6 100644
--- a/triedb/pathdb/history_reader.go
+++ b/triedb/pathdb/history_reader.go
@@ -320,11 +320,12 @@ func (r *historyReader) read(state stateIdentQuery, stateID uint64, lastID uint6
tail, err := r.freezer.Tail()
if err != nil {
return nil, err
- }
- // stateID == tail is allowed, as the first history object preserved
- // is tail+1
+ } // firstID = tail+1
+
+ // stateID+1 == firstID is allowed, as all the subsequent state histories
+ // are present with no gap inside.
if stateID < tail {
- return nil, errors.New("historical state has been pruned")
+ return nil, fmt.Errorf("historical state has been pruned, first: %d, state: %d", tail+1, stateID)
}
// To serve the request, all state histories from stateID+1 to lastID
diff --git a/triedb/pathdb/history_reader_test.go b/triedb/pathdb/history_reader_test.go
index 4eb93fb9c9..9028a886ce 100644
--- a/triedb/pathdb/history_reader_test.go
+++ b/triedb/pathdb/history_reader_test.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/internal/testrand"
)
func waitIndexing(db *Database) {
@@ -36,11 +37,29 @@ func waitIndexing(db *Database) {
}
}
-func checkHistoricState(env *tester, root common.Hash, hr *historyReader) error {
+func stateAvail(id uint64, env *tester) bool {
+ if env.db.config.StateHistory == 0 {
+ return true
+ }
+ dl := env.db.tree.bottom()
+ if dl.stateID() <= env.db.config.StateHistory {
+ return true
+ }
+ firstID := dl.stateID() - env.db.config.StateHistory + 1
+
+ return id+1 >= firstID
+}
+
+func checkHistoricalState(env *tester, root common.Hash, id uint64, hr *historyReader) error {
+ if !stateAvail(id, env) {
+ return nil
+ }
+
// Short circuit if the historical state is no longer available
if rawdb.ReadStateID(env.db.diskdb, root) == nil {
- return nil
+ return fmt.Errorf("state not found %d %x", id, root)
}
+
var (
dl = env.db.tree.bottom()
stateID = rawdb.ReadStateID(env.db.diskdb, root)
@@ -124,22 +143,22 @@ func testHistoryReader(t *testing.T, historyLimit uint64) {
defer func() {
maxDiffLayers = 128
}()
- //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true)))
+ // log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true)))
env := newTester(t, historyLimit, false, 64, true, "")
defer env.release()
waitIndexing(env.db)
var (
roots = env.roots
- dRoot = env.db.tree.bottom().rootHash()
- hr = newHistoryReader(env.db.diskdb, env.db.freezer)
+ dl = env.db.tree.bottom()
+ hr = newHistoryReader(env.db.diskdb, env.db.stateFreezer)
)
- for _, root := range roots {
- if root == dRoot {
+ for i, root := range roots {
+ if root == dl.rootHash() {
break
}
- if err := checkHistoricState(env, root, hr); err != nil {
+ if err := checkHistoricalState(env, root, uint64(i+1), hr); err != nil {
t.Fatal(err)
}
}
@@ -148,12 +167,41 @@ func testHistoryReader(t *testing.T, historyLimit uint64) {
env.extend(4)
waitIndexing(env.db)
- for _, root := range roots {
- if root == dRoot {
+ for i, root := range roots {
+ if root == dl.rootHash() {
break
}
- if err := checkHistoricState(env, root, hr); err != nil {
+ if err := checkHistoricalState(env, root, uint64(i+1), hr); err != nil {
t.Fatal(err)
}
}
}
+
+func TestHistoricalStateReader(t *testing.T) {
+ maxDiffLayers = 4
+ defer func() {
+ maxDiffLayers = 128
+ }()
+
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelDebug, true)))
+ env := newTester(t, 0, false, 64, true, "")
+ defer env.release()
+ waitIndexing(env.db)
+
+ // non-canonical state
+ fakeRoot := testrand.Hash()
+ rawdb.WriteStateID(env.db.diskdb, fakeRoot, 10)
+
+ _, err := env.db.HistoricReader(fakeRoot)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+ t.Log(err)
+
+ // canonical state
+ realRoot := env.roots[9]
+ _, err = env.db.HistoricReader(realRoot)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+}
diff --git a/triedb/pathdb/history_state.go b/triedb/pathdb/history_state.go
new file mode 100644
index 0000000000..3bb69a7f4d
--- /dev/null
+++ b/triedb/pathdb/history_state.go
@@ -0,0 +1,610 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "maps"
+ "slices"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// State history records the state changes involved in executing a block. The
+// state can be reverted to the previous version by applying the associated
+// history object (state reverse diff). State history objects are kept to
+// guarantee that the system can perform state rollbacks in case of deep reorg.
+//
+// Each state transition will generate a state history object. Note that not
+// every block has a corresponding state history object. If a block performs
+// no state changes whatsoever, no state is created for it. Each state history
+// will have a sequentially increasing number acting as its unique identifier.
+//
+// The state history is written to disk (ancient store) when the corresponding
+// diff layer is merged into the disk layer. At the same time, system can prune
+// the oldest histories according to config.
+//
+// Disk State
+// ^
+// |
+// +------------+ +---------+ +---------+ +---------+
+// | Init State |---->| State 1 |---->| ... |---->| State n |
+// +------------+ +---------+ +---------+ +---------+
+//
+// +-----------+ +------+ +-----------+
+// | History 1 |----> | ... |---->| History n |
+// +-----------+ +------+ +-----------+
+//
+// # Rollback
+//
+// If the system wants to roll back to a previous state n, it needs to ensure
+// all history objects from n+1 up to the current disk layer are existent. The
+// history objects are applied to the state in reverse order, starting from the
+// current disk layer.
+
+const (
+ accountIndexSize = common.AddressLength + 13 // The length of encoded account index
+ slotIndexSize = common.HashLength + 5 // The length of encoded slot index
+ historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
+
+ stateHistoryV0 = uint8(0) // initial version of state history structure
+ stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
+ historyVersion = stateHistoryV1 // the default state history version
+)
+
+// Each state history entry is consisted of five elements:
+//
+// # metadata
+// This object contains a few meta fields, such as the associated state root,
+// block number, version tag and so on. This object may contain an extra
+// accountHash list which means the storage changes belong to these accounts
+// are not complete due to large contract destruction. The incomplete history
+// can not be used for rollback and serving archive state request.
+//
+// # account index
+// This object contains some index information of account. For example, offset
+// and length indicate the location of the data belonging to the account. Besides,
+// storageOffset and storageSlots indicate the storage modification location
+// belonging to the account.
+//
+// The size of each account index is *fixed*, and all indexes are sorted
+// lexicographically. Thus binary search can be performed to quickly locate a
+// specific account.
+//
+// # account data
+// Account data is a concatenated byte stream composed of all account data.
+// The account data can be solved by the offset and length info indicated
+// by corresponding account index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | length
+// offset |----------------+
+// v v
+// +----------------+----------------+----------------+----------------+
+// | Account data 1 | Account data 2 | ... | Account data N |
+// +----------------+----------------+----------------+----------------+
+//
+// # storage index
+// This object is similar with account index. It's also fixed size and contains
+// the location info of storage slot data.
+//
+// # storage data
+// Storage data is a concatenated byte stream composed of all storage slot data.
+// The storage slot data can be solved by the location info indicated by
+// corresponding account index and storage slot index.
+//
+// fixed size
+// ^ ^
+// / \
+// +-----------------+-----------------+----------------+-----------------+
+// | Account index 1 | Account index 2 | ... | Account index N |
+// +-----------------+-----------------+----------------+-----------------+
+// |
+// | storage slots
+// storage offset |-----------------------------------------------------+
+// v v
+// +-----------------+-----------------+-----------------+
+// | storage index 1 | storage index 2 | storage index 3 |
+// +-----------------+-----------------+-----------------+
+// | length
+// offset |-------------+
+// v v
+// +-------------+
+// | slot data 1 |
+// +-------------+
+
+// accountIndex describes the metadata belonging to an account.
+type accountIndex struct {
+ address common.Address // The address of account
+ length uint8 // The length of account data, size limited by 255
+ offset uint32 // The offset of item in account data table
+ storageOffset uint32 // The offset of storage index in storage index table
+ storageSlots uint32 // The number of mutated storage slots belonging to the account
+}
+
+// encode packs account index into byte stream.
+func (i *accountIndex) encode() []byte {
+ var buf [accountIndexSize]byte
+ copy(buf[:], i.address.Bytes())
+ buf[common.AddressLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset)
+ binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots)
+ return buf[:]
+}
+
+// decode unpacks account index from byte stream.
+func (i *accountIndex) decode(blob []byte) {
+ i.address = common.BytesToAddress(blob[:common.AddressLength])
+ i.length = blob[common.AddressLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:])
+ i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:])
+ i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:])
+}
+
+// slotIndex describes the metadata belonging to a storage slot.
+type slotIndex struct {
+ // the identifier of the storage slot. Specifically
+ // in v0, it's the hash of the raw storage slot key (32 bytes);
+ // in v1, it's the raw storage slot key (32 bytes);
+ id common.Hash
+ length uint8 // The length of storage slot, up to 32 bytes defined in protocol
+ offset uint32 // The offset of item in storage slot data table
+}
+
+// encode packs slot index into byte stream.
+func (i *slotIndex) encode() []byte {
+ var buf [slotIndexSize]byte
+ copy(buf[:common.HashLength], i.id.Bytes())
+ buf[common.HashLength] = i.length
+ binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
+ return buf[:]
+}
+
+// decode unpack slot index from the byte stream.
+func (i *slotIndex) decode(blob []byte) {
+ i.id = common.BytesToHash(blob[:common.HashLength])
+ i.length = blob[common.HashLength]
+ i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
+}
+
+// meta describes the meta data of state history object.
+type meta struct {
+ version uint8 // version tag of history object
+ parent common.Hash // prev-state root before the state transition
+ root common.Hash // post-state root after the state transition
+ block uint64 // associated block number
+}
+
+// encode packs the meta object into byte stream.
+func (m *meta) encode() []byte {
+ buf := make([]byte, historyMetaSize)
+ buf[0] = m.version
+ copy(buf[1:1+common.HashLength], m.parent.Bytes())
+ copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes())
+ binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block)
+ return buf[:]
+}
+
+// decode unpacks the meta object from byte stream.
+func (m *meta) decode(blob []byte) error {
+ if len(blob) < 1 {
+ return errors.New("no version tag")
+ }
+ switch blob[0] {
+ case stateHistoryV0, stateHistoryV1:
+ if len(blob) != historyMetaSize {
+ return fmt.Errorf("invalid state history meta, len: %d", len(blob))
+ }
+ m.version = blob[0]
+ m.parent = common.BytesToHash(blob[1 : 1+common.HashLength])
+ m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength])
+ m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize])
+ return nil
+ default:
+ return fmt.Errorf("unknown version %d", blob[0])
+ }
+}
+
+// stateHistory represents a set of state changes belong to a block along with
+// the metadata including the state roots involved in the state transition.
+//
+// State history objects in disk are linked with each other by a unique id
+// (8-bytes integer), the oldest state history object can be pruned on demand
+// in order to control the storage size.
+type stateHistory struct {
+ meta *meta // Meta data of history
+ accounts map[common.Address][]byte // Account data keyed by its address hash
+ accountList []common.Address // Sorted account hash list
+ storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash
+ storageList map[common.Address][]common.Hash // Sorted slot hash list
+}
+
+// newStateHistory constructs the state history object with provided states.
+func newStateHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *stateHistory {
+ var (
+ accountList = slices.SortedFunc(maps.Keys(accounts), common.Address.Cmp)
+ storageList = make(map[common.Address][]common.Hash)
+ )
+ for addr, slots := range storages {
+ storageList[addr] = slices.SortedFunc(maps.Keys(slots), common.Hash.Cmp)
+ }
+ version := historyVersion
+ if !rawStorageKey {
+ version = stateHistoryV0
+ }
+ return &stateHistory{
+ meta: &meta{
+ version: version,
+ parent: parent,
+ root: root,
+ block: block,
+ },
+ accounts: accounts,
+ accountList: accountList,
+ storages: storages,
+ storageList: storageList,
+ }
+}
+
+// stateSet returns the state set, keyed by the hash of the account address
+// and the hash of the storage slot key.
+func (h *stateHistory) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) {
+ var (
+ accounts = make(map[common.Hash][]byte)
+ storages = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ for addr, blob := range h.accounts {
+ addrHash := crypto.Keccak256Hash(addr.Bytes())
+ accounts[addrHash] = blob
+
+ storage, exist := h.storages[addr]
+ if !exist {
+ continue
+ }
+ if h.meta.version == stateHistoryV0 {
+ storages[addrHash] = storage
+ } else {
+ subset := make(map[common.Hash][]byte)
+ for key, slot := range storage {
+ subset[crypto.Keccak256Hash(key.Bytes())] = slot
+ }
+ storages[addrHash] = subset
+ }
+ }
+ return accounts, storages
+}
+
+// encode serializes the state history and returns four byte streams represent
+// concatenated account/storage data, account/storage indexes respectively.
+func (h *stateHistory) encode() ([]byte, []byte, []byte, []byte) {
+ var (
+ slotNumber uint32 // the number of processed slots
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+ )
+ for _, addr := range h.accountList {
+ accIndex := accountIndex{
+ address: addr,
+ length: uint8(len(h.accounts[addr])),
+ offset: uint32(len(accountData)),
+ }
+ slots, exist := h.storages[addr]
+ if exist {
+ // Encode storage slots in order
+ for _, slotHash := range h.storageList[addr] {
+ sIndex := slotIndex{
+ id: slotHash,
+ length: uint8(len(slots[slotHash])),
+ offset: uint32(len(storageData)),
+ }
+ storageData = append(storageData, slots[slotHash]...)
+ storageIndexes = append(storageIndexes, sIndex.encode()...)
+ }
+ // Fill up the storage meta in account index
+ accIndex.storageOffset = slotNumber
+ accIndex.storageSlots = uint32(len(slots))
+ slotNumber += uint32(len(slots))
+ }
+ accountData = append(accountData, h.accounts[addr]...)
+ accountIndexes = append(accountIndexes, accIndex.encode()...)
+ }
+ return accountData, storageData, accountIndexes, storageIndexes
+}
+
+// decoder wraps the byte streams for decoding with extra meta fields.
+type decoder struct {
+ accountData []byte // the buffer for concatenated account data
+ storageData []byte // the buffer for concatenated storage data
+ accountIndexes []byte // the buffer for concatenated account index
+ storageIndexes []byte // the buffer for concatenated storage index
+
+ lastAccount *common.Address // the address of last resolved account
+ lastAccountRead uint32 // the read-cursor position of account data
+ lastSlotIndexRead uint32 // the read-cursor position of storage slot index
+ lastSlotDataRead uint32 // the read-cursor position of storage slot data
+}
+
+// verify validates the provided byte streams for decoding state history. A few
+// checks will be performed to quickly detect data corruption. The byte stream
+// is regarded as corrupted if:
+//
+// - account indexes buffer is empty(empty state set is invalid)
+// - account indexes/storage indexer buffer is not aligned
+//
+// note, these situations are allowed:
+//
+// - empty account data: all accounts were not present
+// - empty storage set: no slots are modified
+func (r *decoder) verify() error {
+ if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 {
+ return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes))
+ }
+ if len(r.storageIndexes)%slotIndexSize != 0 {
+ return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes))
+ }
+ return nil
+}
+
+// readAccount parses the account from the byte stream with specified position.
+func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
+ // Decode account index from the index byte stream.
+ var index accountIndex
+ if (pos+1)*accountIndexSize > len(r.accountIndexes) {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize])
+
+ // Perform validation before parsing account data, ensure
+ // - account is sorted in order in byte stream
+ // - account data is strictly encoded with no gap inside
+ // - account data is not out-of-slice
+ if r.lastAccount != nil { // zero address is possible
+ if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 {
+ return accountIndex{}, nil, errors.New("account is not in order")
+ }
+ }
+ if index.offset != r.lastAccountRead {
+ return accountIndex{}, nil, errors.New("account data buffer is gaped")
+ }
+ last := index.offset + uint32(index.length)
+ if uint32(len(r.accountData)) < last {
+ return accountIndex{}, nil, errors.New("account data buffer is corrupted")
+ }
+ data := r.accountData[index.offset:last]
+
+ r.lastAccount = &index.address
+ r.lastAccountRead = last
+
+ return index, data, nil
+}
+
+// readStorage parses the storage slots from the byte stream with specified account.
+func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
+ var (
+ last *common.Hash
+ count = int(accIndex.storageSlots)
+ list = make([]common.Hash, 0, count)
+ storage = make(map[common.Hash][]byte, count)
+ )
+ for j := 0; j < count; j++ {
+ var (
+ index slotIndex
+ start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize)
+ end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize)
+ )
+ // Perform validation before parsing storage slot data, ensure
+ // - slot index is not out-of-slice
+ // - slot data is not out-of-slice
+ // - slot is sorted in order in byte stream
+ // - slot indexes is strictly encoded with no gap inside
+ // - slot data is strictly encoded with no gap inside
+ if start != r.lastSlotIndexRead {
+ return nil, nil, errors.New("storage index buffer is gapped")
+ }
+ if uint32(len(r.storageIndexes)) < end {
+ return nil, nil, errors.New("storage index buffer is corrupted")
+ }
+ index.decode(r.storageIndexes[start:end])
+
+ if last != nil {
+ if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
+ return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
+ }
+ }
+ if index.offset != r.lastSlotDataRead {
+ return nil, nil, errors.New("storage data buffer is gapped")
+ }
+ sEnd := index.offset + uint32(index.length)
+ if uint32(len(r.storageData)) < sEnd {
+ return nil, nil, errors.New("storage data buffer is corrupted")
+ }
+ storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
+ list = append(list, index.id)
+
+ last = &index.id
+ r.lastSlotIndexRead = end
+ r.lastSlotDataRead = sEnd
+ }
+ return list, storage, nil
+}
+
+// decode deserializes the account and storage data from the provided byte stream.
+func (h *stateHistory) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error {
+ var (
+ count = len(accountIndexes) / accountIndexSize
+ accounts = make(map[common.Address][]byte, count)
+ storages = make(map[common.Address]map[common.Hash][]byte)
+ accountList = make([]common.Address, 0, count)
+ storageList = make(map[common.Address][]common.Hash)
+
+ r = &decoder{
+ accountData: accountData,
+ storageData: storageData,
+ accountIndexes: accountIndexes,
+ storageIndexes: storageIndexes,
+ }
+ )
+ if err := r.verify(); err != nil {
+ return err
+ }
+ for i := 0; i < count; i++ {
+ // Resolve account first
+ accIndex, accData, err := r.readAccount(i)
+ if err != nil {
+ return err
+ }
+ accounts[accIndex.address] = accData
+ accountList = append(accountList, accIndex.address)
+
+ // Resolve storage slots
+ slotList, slotData, err := r.readStorage(accIndex)
+ if err != nil {
+ return err
+ }
+ if len(slotList) > 0 {
+ storageList[accIndex.address] = slotList
+ storages[accIndex.address] = slotData
+ }
+ }
+ h.accounts = accounts
+ h.accountList = accountList
+ h.storages = storages
+ h.storageList = storageList
+ return nil
+}
+
+// readStateHistoryMeta reads the metadata of state history with the specified id.
+func readStateHistoryMeta(reader ethdb.AncientReader, id uint64) (*meta, error) {
+ data := rawdb.ReadStateHistoryMeta(reader, id)
+ if len(data) == 0 {
+ return nil, fmt.Errorf("metadata is not found, %d", id)
+ }
+ var m meta
+ err := m.decode(data)
+ if err != nil {
+ return nil, err
+ }
+ return &m, nil
+}
+
+// readStateHistory reads a single state history records with the specified id.
+func readStateHistory(reader ethdb.AncientReader, id uint64) (*stateHistory, error) {
+ mData, accountIndexes, storageIndexes, accountData, storageData, err := rawdb.ReadStateHistory(reader, id)
+ if err != nil {
+ return nil, err
+ }
+ var m meta
+ if err := m.decode(mData); err != nil {
+ return nil, err
+ }
+ h := stateHistory{meta: &m}
+ if err := h.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
+ return nil, err
+ }
+ return &h, nil
+}
+
+// readStateHistories reads a list of state history records within the specified range.
+func readStateHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*stateHistory, error) {
+ var histories []*stateHistory
+ metaList, aIndexList, sIndexList, aDataList, sDataList, err := rawdb.ReadStateHistoryList(freezer, start, count)
+ if err != nil {
+ return nil, err
+ }
+ for i := 0; i < len(metaList); i++ {
+ var m meta
+ if err := m.decode(metaList[i]); err != nil {
+ return nil, err
+ }
+ h := stateHistory{meta: &m}
+ if err := h.decode(aDataList[i], sDataList[i], aIndexList[i], sIndexList[i]); err != nil {
+ return nil, err
+ }
+ histories = append(histories, &h)
+ }
+ return histories, nil
+}
+
+// writeStateHistory persists the state history associated with the given diff layer.
+func writeStateHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
+ // Short circuit if state set is not available.
+ if dl.states == nil {
+ return errors.New("state change set is not available")
+ }
+ var (
+ start = time.Now()
+ history = newStateHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
+ )
+ accountData, storageData, accountIndex, storageIndex := history.encode()
+ dataSize := common.StorageSize(len(accountData) + len(storageData))
+ indexSize := common.StorageSize(len(accountIndex) + len(storageIndex))
+
+ // Write history data into five freezer table respectively.
+ if err := rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData); err != nil {
+ return err
+ }
+ historyDataBytesMeter.Mark(int64(dataSize))
+ historyIndexBytesMeter.Mark(int64(indexSize))
+ historyBuildTimeMeter.UpdateSince(start)
+ log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start)))
+
+ return nil
+}
+
+// checkStateHistories retrieves a batch of metadata objects with the specified
+// range and performs the callback on each item.
+func checkStateHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
+ for count > 0 {
+ number := count
+ if number > 10000 {
+ number = 10000 // split the big read into small chunks
+ }
+ blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number)
+ if err != nil {
+ return err
+ }
+ for _, blob := range blobs {
+ var dec meta
+ if err := dec.decode(blob); err != nil {
+ return err
+ }
+ if err := check(&dec); err != nil {
+ return err
+ }
+ }
+ count -= uint64(len(blobs))
+ start += uint64(len(blobs))
+ }
+ return nil
+}
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_state_test.go
similarity index 65%
rename from triedb/pathdb/history_test.go
rename to triedb/pathdb/history_state_test.go
index 2928d19d74..4a777111ea 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_state_test.go
@@ -18,6 +18,7 @@ package pathdb
import (
"bytes"
+ "errors"
"fmt"
"reflect"
"testing"
@@ -49,36 +50,36 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
return accounts, storages
}
-func makeHistory(rawStorageKey bool) *history {
+func makeStateHistory(rawStorageKey bool) *stateHistory {
accounts, storages := randomStateSet(3)
- return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
+ return newStateHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
}
-func makeHistories(n int) []*history {
+func makeStateHistories(n int) []*stateHistory {
var (
parent = types.EmptyRootHash
- result []*history
+ result []*stateHistory
)
for i := 0; i < n; i++ {
root := testrand.Hash()
accounts, storages := randomStateSet(3)
- h := newHistory(root, parent, uint64(i), accounts, storages, false)
+ h := newStateHistory(root, parent, uint64(i), accounts, storages, false)
parent = root
result = append(result, h)
}
return result
}
-func TestEncodeDecodeHistory(t *testing.T) {
- testEncodeDecodeHistory(t, false)
- testEncodeDecodeHistory(t, true)
+func TestEncodeDecodeStateHistory(t *testing.T) {
+ testEncodeDecodeStateHistory(t, false)
+ testEncodeDecodeStateHistory(t, true)
}
-func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
+func testEncodeDecodeStateHistory(t *testing.T, rawStorageKey bool) {
var (
m meta
- dec history
- obj = makeHistory(rawStorageKey)
+ dec stateHistory
+ obj = makeStateHistory(rawStorageKey)
)
// check if meta data can be correctly encode/decode
blob := obj.meta.encode()
@@ -108,7 +109,7 @@ func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
}
}
-func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) {
+func checkStateHistory(t *testing.T, freezer ethdb.AncientReader, id uint64, exist bool) {
blob := rawdb.ReadStateHistoryMeta(freezer, id)
if exist && len(blob) == 0 {
t.Fatalf("Failed to load trie history, %d", id)
@@ -116,25 +117,17 @@ func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientRe
if !exist && len(blob) != 0 {
t.Fatalf("Unexpected trie history, %d", id)
}
- if exist && rawdb.ReadStateID(db, root) == nil {
- t.Fatalf("Root->ID mapping is not found, %d", id)
- }
- if !exist && rawdb.ReadStateID(db, root) != nil {
- t.Fatalf("Unexpected root->ID mapping, %d", id)
- }
}
-func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) {
- for i, j := from, 0; i <= to; i, j = i+1, j+1 {
- checkHistory(t, db, freezer, i, roots[j], exist)
+func checkHistoriesInRange(t *testing.T, freezer ethdb.AncientReader, from, to uint64, exist bool) {
+ for i := from; i <= to; i = i + 1 {
+ checkStateHistory(t, freezer, i, exist)
}
}
-func TestTruncateHeadHistory(t *testing.T) {
+func TestTruncateHeadStateHistory(t *testing.T) {
var (
- roots []common.Hash
- hs = makeHistories(10)
- db = rawdb.NewMemoryDatabase()
+ hs = makeStateHistories(10)
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)
defer freezer.Close()
@@ -142,27 +135,23 @@ func TestTruncateHeadHistory(t *testing.T) {
for i := 0; i < len(hs); i++ {
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
- rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
- roots = append(roots, hs[i].meta.root)
}
for size := len(hs); size > 0; size-- {
- pruned, err := truncateFromHead(db, freezer, uint64(size-1))
+ pruned, err := truncateFromHead(freezer, uint64(size-1))
if err != nil {
t.Fatalf("Failed to truncate from head %v", err)
}
if pruned != 1 {
t.Error("Unexpected pruned items", "want", 1, "got", pruned)
}
- checkHistoriesInRange(t, db, freezer, uint64(size), uint64(10), roots[size-1:], false)
- checkHistoriesInRange(t, db, freezer, uint64(1), uint64(size-1), roots[:size-1], true)
+ checkHistoriesInRange(t, freezer, uint64(size), uint64(10), false)
+ checkHistoriesInRange(t, freezer, uint64(1), uint64(size-1), true)
}
}
-func TestTruncateTailHistory(t *testing.T) {
+func TestTruncateTailStateHistory(t *testing.T) {
var (
- roots []common.Hash
- hs = makeHistories(10)
- db = rawdb.NewMemoryDatabase()
+ hs = makeStateHistories(10)
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)
defer freezer.Close()
@@ -170,20 +159,18 @@ func TestTruncateTailHistory(t *testing.T) {
for i := 0; i < len(hs); i++ {
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
- rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
- roots = append(roots, hs[i].meta.root)
}
for newTail := 1; newTail < len(hs); newTail++ {
- pruned, _ := truncateFromTail(db, freezer, uint64(newTail))
+ pruned, _ := truncateFromTail(freezer, uint64(newTail))
if pruned != 1 {
t.Error("Unexpected pruned items", "want", 1, "got", pruned)
}
- checkHistoriesInRange(t, db, freezer, uint64(1), uint64(newTail), roots[:newTail], false)
- checkHistoriesInRange(t, db, freezer, uint64(newTail+1), uint64(10), roots[newTail:], true)
+ checkHistoriesInRange(t, freezer, uint64(1), uint64(newTail), false)
+ checkHistoriesInRange(t, freezer, uint64(newTail+1), uint64(10), true)
}
}
-func TestTruncateTailHistories(t *testing.T) {
+func TestTruncateTailStateHistories(t *testing.T) {
var cases = []struct {
limit uint64
expPruned int
@@ -191,21 +178,29 @@ func TestTruncateTailHistories(t *testing.T) {
minUnpruned uint64
empty bool
}{
+ // history: id [10]
{
- 1, 9, 9, 10, false,
+ limit: 1,
+ expPruned: 9,
+ maxPruned: 9, minUnpruned: 10, empty: false,
},
+ // history: none
{
- 0, 10, 10, 0 /* no meaning */, true,
+ limit: 0,
+ expPruned: 10,
+ empty: true,
},
+ // history: id [1:10]
{
- 10, 0, 0, 1, false,
+ limit: 10,
+ expPruned: 0,
+ maxPruned: 0,
+ minUnpruned: 1,
},
}
for i, c := range cases {
var (
- roots []common.Hash
- hs = makeHistories(10)
- db = rawdb.NewMemoryDatabase()
+ hs = makeStateHistories(10)
freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false, false)
)
defer freezer.Close()
@@ -213,27 +208,23 @@ func TestTruncateTailHistories(t *testing.T) {
for i := 0; i < len(hs); i++ {
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
- rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
- roots = append(roots, hs[i].meta.root)
}
- pruned, _ := truncateFromTail(db, freezer, uint64(10)-c.limit)
+ pruned, _ := truncateFromTail(freezer, uint64(10)-c.limit)
if pruned != c.expPruned {
t.Error("Unexpected pruned items", "want", c.expPruned, "got", pruned)
}
if c.empty {
- checkHistoriesInRange(t, db, freezer, uint64(1), uint64(10), roots, false)
+ checkHistoriesInRange(t, freezer, uint64(1), uint64(10), false)
} else {
- tail := 10 - int(c.limit)
- checkHistoriesInRange(t, db, freezer, uint64(1), c.maxPruned, roots[:tail], false)
- checkHistoriesInRange(t, db, freezer, c.minUnpruned, uint64(10), roots[tail:], true)
+ checkHistoriesInRange(t, freezer, uint64(1), c.maxPruned, false)
+ checkHistoriesInRange(t, freezer, c.minUnpruned, uint64(10), true)
}
}
}
func TestTruncateOutOfRange(t *testing.T) {
var (
- hs = makeHistories(10)
- db = rawdb.NewMemoryDatabase()
+ hs = makeStateHistories(10)
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
)
defer freezer.Close()
@@ -241,9 +232,8 @@ func TestTruncateOutOfRange(t *testing.T) {
for i := 0; i < len(hs); i++ {
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
- rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
}
- truncateFromTail(db, freezer, uint64(len(hs)/2))
+ truncateFromTail(freezer, uint64(len(hs)/2))
// Ensure of-out-range truncations are rejected correctly.
head, _ := freezer.Ancients()
@@ -255,20 +245,20 @@ func TestTruncateOutOfRange(t *testing.T) {
expErr error
}{
{0, head, nil}, // nothing to delete
- {0, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
- {0, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
+ {0, head + 1, errHeadTruncationOutOfRange},
+ {0, tail - 1, errHeadTruncationOutOfRange},
{1, tail, nil}, // nothing to delete
- {1, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
- {1, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
+ {1, head + 1, errTailTruncationOutOfRange},
+ {1, tail - 1, errTailTruncationOutOfRange},
}
for _, c := range cases {
var gotErr error
if c.mode == 0 {
- _, gotErr = truncateFromHead(db, freezer, c.target)
+ _, gotErr = truncateFromHead(freezer, c.target)
} else {
- _, gotErr = truncateFromTail(db, freezer, c.target)
+ _, gotErr = truncateFromTail(freezer, c.target)
}
- if !reflect.DeepEqual(gotErr, c.expErr) {
+ if !errors.Is(gotErr, c.expErr) {
t.Errorf("Unexpected error, want: %v, got: %v", c.expErr, gotErr)
}
}
diff --git a/triedb/pathdb/iterator.go b/triedb/pathdb/iterator.go
index 84ea08ddd3..8ca8247206 100644
--- a/triedb/pathdb/iterator.go
+++ b/triedb/pathdb/iterator.go
@@ -309,7 +309,7 @@ type diskStorageIterator struct {
it ethdb.Iterator
}
-// StorageIterator creates a storage iterator over the persistent state.
+// newDiskStorageIterator creates a storage iterator over the persistent state.
func newDiskStorageIterator(db ethdb.KeyValueStore, account common.Hash, seek common.Hash) StorageIterator {
pos := common.TrimRightZeroes(seek[:])
return &diskStorageIterator{
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index 4639932763..47a632fd37 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -333,7 +333,16 @@ func (db *Database) Journal(root common.Hash) error {
if db.readOnly {
return errDatabaseReadOnly
}
-
+ // Forcibly sync the ancient store before persisting the in-memory layers.
+ // This prevents an edge case where the in-memory layers are persisted
+ // but the ancient store is not properly closed, resulting in recent writes
+ // being lost. After a restart, the ancient store would then be misaligned
+ // with the disk layer, causing data corruption.
+ if db.stateFreezer != nil {
+ if err := db.stateFreezer.SyncAncient(); err != nil {
+ return err
+ }
+ }
// Store the journal into the database and return
var (
file *os.File
diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go
index b7b18f13f9..842ac0972e 100644
--- a/triedb/pathdb/reader.go
+++ b/triedb/pathdb/reader.go
@@ -207,30 +207,34 @@ type HistoricalStateReader struct {
// HistoricReader constructs a reader for accessing the requested historic state.
func (db *Database) HistoricReader(root common.Hash) (*HistoricalStateReader, error) {
// Bail out if the state history hasn't been fully indexed
- if db.indexer == nil || !db.indexer.inited() {
- return nil, errors.New("state histories haven't been fully indexed yet")
+ if db.stateIndexer == nil || db.stateFreezer == nil {
+ return nil, fmt.Errorf("historical state %x is not available", root)
}
- if db.freezer == nil {
- return nil, errors.New("state histories are not available")
+ if !db.stateIndexer.inited() {
+ return nil, errors.New("state histories haven't been fully indexed yet")
}
- // States at the current disk layer or above are directly accessible via
- // db.StateReader.
- //
- // States older than the current disk layer (including the disk layer
- // itself) are available through historic state access.
+ // - States at the current disk layer or above are directly accessible
+ // via `db.StateReader`.
//
- // Note: the requested state may refer to a stale historic state that has
- // already been pruned. This function does not validate availability, as
- // underlying states may be pruned dynamically. Validity is checked during
- // each actual state retrieval.
+ // - States older than the current disk layer (including the disk layer
+ // itself) are available via `db.HistoricReader`.
id := rawdb.ReadStateID(db.diskdb, root)
if id == nil {
return nil, fmt.Errorf("state %#x is not available", root)
}
+ // Ensure the requested state is canonical, historical states on side chain
+ // are not accessible.
+ meta, err := readStateHistoryMeta(db.stateFreezer, *id+1)
+ if err != nil {
+ return nil, err // e.g., the referred state history has been pruned
+ }
+ if meta.parent != root {
+ return nil, fmt.Errorf("state %#x is not canonincal", root)
+ }
return &HistoricalStateReader{
id: *id,
db: db,
- reader: newHistoryReader(db.diskdb, db.freezer),
+ reader: newHistoryReader(db.diskdb, db.stateFreezer),
}, nil
}
diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go
index bc638a569e..dc737c3b53 100644
--- a/triedb/pathdb/states.go
+++ b/triedb/pathdb/states.go
@@ -181,7 +181,7 @@ func (s *stateSet) accountList() []common.Hash {
return list
}
-// StorageList returns a sorted list of all storage slot hashes in this state set
+// storageList returns a sorted list of all storage slot hashes in this state set
// for the given account. The returned list will include the hash of deleted
// storage slot.
//
diff --git a/triedb/pathdb/verifier.go b/triedb/pathdb/verifier.go
index 2d6f72925b..a69b10f4f3 100644
--- a/triedb/pathdb/verifier.go
+++ b/triedb/pathdb/verifier.go
@@ -166,20 +166,17 @@ func (stat *generateStats) report() {
// If there's progress on the account trie, estimate the time to finish crawling it
if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 {
var (
- left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
- speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
- eta = time.Duration(left/speed) * time.Millisecond
+ left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts
+ eta = common.CalculateETA(done, left, time.Since(stat.start))
)
// If there are large contract crawls in progress, estimate their finish time
for acc, head := range stat.slotsHead {
start := stat.slotsStart[acc]
if done := binary.BigEndian.Uint64(head[:8]); done > 0 {
- var (
- left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
- speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
- )
+ left := math.MaxUint64 - binary.BigEndian.Uint64(head[:8])
+
// Override the ETA if larger than the largest until now
- if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA {
+ if slotETA := common.CalculateETA(done, left, time.Since(start)); eta < slotETA {
eta = slotETA
}
}
diff --git a/version/version.go b/version/version.go
index 7ca8aff250..7fe1ba967d 100644
--- a/version/version.go
+++ b/version/version.go
@@ -25,7 +25,7 @@ import (
const (
Major = 1 // Major version component of the current release
Minor = 16 // Minor version component of the current release
- Patch = 2 // Patch version component of the current release
+ Patch = 3 // Patch version component of the current release
Meta = "stable" // Version metadata to append to the version string
)