diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index da1fb3701f..76f288b472 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -423,7 +423,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
sdb := state.NewDatabase(tdb, nil)
statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts {
- statedb.SetCode(addr, a.Code)
+ statedb.SetCode(addr, a.Code, tracing.CodeChangeGenesis)
statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis)
statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance)
for k, v := range a.Storage {
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index c6bc5ea333..d5f950b6f9 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -323,7 +323,7 @@ func runCmd(ctx *cli.Context) error {
}
} else {
if len(code) > 0 {
- prestate.SetCode(receiver, code)
+ prestate.SetCode(receiver, code, tracing.CodeChangeUnspecified)
}
execFunc = func() ([]byte, uint64, error) {
// don't mutate the state!
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index e4a71282d5..b93a1f9611 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -148,7 +148,6 @@ var (
utils.MinerRecommitIntervalFlag,
utils.MinerNewPayloadTimeoutFlag, // deprecated
utils.MinerDelayLeftoverFlag,
- utils.EnableBALFlag,
// utils.MinerNewPayloadTimeout,
utils.NATFlag,
utils.NoDiscoverFlag,
@@ -193,6 +192,7 @@ var (
utils.IncrSnapshotKeptBlocksFlag,
utils.UseRemoteIncrSnapshotFlag,
utils.RemoteIncrSnapshotURLFlag,
+ utils.ExperimentalBALFlag,
// utils.BeaconApiFlag,
// utils.BeaconApiHeaderFlag,
// utils.BeaconThresholdFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 76606cd1f0..a854216ec0 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -183,11 +183,6 @@ var (
Usage: "Chapel network: pre-configured Proof-of-Stake-Authority BSC test network",
Category: flags.EthCategory,
}
- EnableBALFlag = &cli.BoolFlag{
- Name: "enablebal",
- Usage: "Enable block access list feature, validator will generate BAL for each block",
- Category: flags.EthCategory,
- }
// Dev mode
DeveloperFlag = &cli.BoolFlag{
Name: "dev",
@@ -1335,6 +1330,14 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
Value: "",
Category: flags.StateCategory,
}
+
+ // Block Access List flags
+
+ ExperimentalBALFlag = &cli.BoolFlag{
+ Name: "experimental.bal",
+ Usage: "Enable block-access-list building when importing post-Cancun blocks, and validation that access lists contained in post-Cancun blocks correctly correspond to the state changes in those blocks. This is used for development purposes only. Do not enable it otherwise.",
+ Category: flags.MiscCategory,
+ }
)
var (
@@ -1809,9 +1812,6 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
if ctx.IsSet(DisableSnapProtocolFlag.Name) {
cfg.DisableSnapProtocol = ctx.Bool(DisableSnapProtocolFlag.Name)
}
- if ctx.IsSet(EnableBALFlag.Name) {
- cfg.EnableBAL = ctx.Bool(EnableBALFlag.Name)
- }
if ctx.IsSet(RangeLimitFlag.Name) {
cfg.RangeLimit = ctx.Bool(RangeLimitFlag.Name)
}
@@ -2111,9 +2111,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.IsSet(CacheNoPrefetchFlag.Name) {
cfg.NoPrefetch = ctx.Bool(CacheNoPrefetchFlag.Name)
}
- if ctx.IsSet(EnableBALFlag.Name) {
- cfg.EnableBAL = ctx.Bool(EnableBALFlag.Name)
- }
// Read the value from the flag no matter if it's set or not.
cfg.Preimages = ctx.Bool(CachePreimagesFlag.Name)
if cfg.NoPruning && !cfg.Preimages {
@@ -2387,6 +2384,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
}
+ cfg.ExperimentalBAL = ctx.Bool(ExperimentalBALFlag.Name)
+
// Download and merge incremental snapshot config
if ctx.IsSet(UseRemoteIncrSnapshotFlag.Name) {
cfg.UseRemoteIncrSnapshot = true
@@ -2808,7 +2807,6 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
options := &core.BlockChainConfig{
TrieCleanLimit: ethconfig.Defaults.TrieCleanCache,
NoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name),
- EnableBAL: ctx.Bool(EnableBALFlag.Name),
TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache,
ArchiveMode: ctx.String(GCModeFlag.Name) == "archive",
TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
@@ -2863,6 +2861,7 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
options.VmConfig = vmcfg
+ options.EnableBAL = ctx.Bool(ExperimentalBALFlag.Name)
chain, err := core.NewBlockChain(chainDb, gspec, engine, options)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index aaa1111456..04f9add718 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
- "github.com/ethereum/go-ethereum/core/state"
+ state2 "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -405,10 +405,9 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
// assembling the block.
-func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
- // FinalizeAndAssemble is different with Prepare, it can be used in both block generation.
+func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state2.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalization func()) (*types.Block, []*types.Receipt, error) {
if !beacon.IsPoSHeader(header) {
- return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts, tracer)
+ return beacon.ethone.FinalizeAndAssemble(chain, header, state, body, receipts, tracer, onFinalization)
}
shanghai := chain.Config().IsShanghai(header.Number, header.Time)
if shanghai {
@@ -427,6 +426,10 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(true)
+ if onFinalization != nil {
+ onFinalization()
+ }
+
// Assemble the final block.
block := types.NewBlock(header, body, receipts, trie.NewStackTrie(nil))
@@ -491,15 +494,6 @@ func (beacon *Beacon) SealHash(header *types.Header) common.Hash {
return beacon.ethone.SealHash(header)
}
-func (beacon *Beacon) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
- return nil
-}
-
-// VerifyBAL verifies the BAL of the block
-func (beacon *Beacon) VerifyBAL(block *types.Block, bal *types.BlockAccessListEncode) error {
- return nil
-}
-
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
@@ -551,3 +545,11 @@ func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, par
}
return td.Cmp(chain.Config().TerminalTotalDifficulty) >= 0, nil
}
+
+func (beacon *Beacon) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
+ return nil
+}
+
+func (beacon *Beacon) VerifyBAL(block *types.Block, blockAccessList *types.BlockAccessListEncode) error {
+ return nil
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 411ddc5fd4..9f0a649531 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -599,7 +599,7 @@ func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
-func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalize func()) (*types.Block, []*types.Receipt, error) {
if len(body.Withdrawals) > 0 {
return nil, nil, errors.New("clique does not support withdrawals")
}
@@ -609,6 +609,9 @@ func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ if onFinalize != nil {
+ onFinalize()
+ }
// Assemble and return the final block for sealing.
return types.NewBlock(header, &types.Body{Transactions: body.Transactions}, receipts, trie.NewStackTrie(nil)), receipts, nil
}
@@ -788,10 +791,10 @@ func encodeSigHeader(w io.Writer, header *types.Header) {
}
}
-func (c *Clique) SignBAL(bal *types.BlockAccessListEncode) error {
+func (c *Clique) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
return nil
}
-func (c *Clique) VerifyBAL(block *types.Block, bal *types.BlockAccessListEncode) error {
+func (c *Clique) VerifyBAL(block *types.Block, blockAccessList *types.BlockAccessListEncode) error {
return nil
}
diff --git a/consensus/consensus.go b/consensus/consensus.go
index e5df31b820..cd16b3caf3 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -21,11 +21,12 @@ import (
"math/big"
"time"
+ state2 "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/vm"
+
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
)
@@ -124,7 +125,7 @@ type Engine interface {
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
- FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error)
+ FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state2.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalization func()) (*types.Block, []*types.Receipt, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.
@@ -136,16 +137,16 @@ type Engine interface {
// SealHash returns the hash of a block prior to it being sealed.
SealHash(header *types.Header) common.Hash
+ // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
+ // that a new block should have.
+ CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
+
// SignBAL signs the BAL of the block
SignBAL(blockAccessList *types.BlockAccessListEncode) error
// VerifyBAL verifies the BAL of the block
VerifyBAL(block *types.Block, bal *types.BlockAccessListEncode) error
- // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
- // that a new block should have.
- CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
-
// Delay returns the max duration the miner can commit txs
Delay(chain ChainReader, header *types.Header, leftOver *time.Duration) *time.Duration
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index de28a1d451..6033d92b12 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -523,7 +523,7 @@ func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
-func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalize func()) (*types.Block, []*types.Receipt, error) {
if len(body.Withdrawals) > 0 {
return nil, nil, errors.New("ethash does not support withdrawals")
}
@@ -533,6 +533,10 @@ func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ if onFinalize != nil {
+ onFinalize()
+ }
+
// Header seems complete, assemble into a block and return
return types.NewBlock(header, &types.Body{Transactions: body.Transactions, Uncles: body.Uncles}, receipts, trie.NewStackTrie(nil)), receipts, nil
}
diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go
index 2742469e6e..548ea192a3 100644
--- a/consensus/parlia/parlia.go
+++ b/consensus/parlia/parlia.go
@@ -1495,10 +1495,85 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
return nil
}
+func (p *Parlia) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
+ p.lock.RLock()
+ val, signFn := p.val, p.signFn
+ p.lock.RUnlock()
+
+ data, err := rlp.EncodeToBytes([]interface{}{blockAccessList.Version, blockAccessList.Number, blockAccessList.Hash, blockAccessList.AccessList.Hash()})
+ if err != nil {
+ log.Error("Encode to bytes failed when sealing", "err", err)
+ return errors.New("encode to bytes failed")
+ }
+
+ if len(data) > int(params.MaxBALSize) {
+ log.Error("data is too large", "dataSize", len(data), "maxSize", params.MaxBALSize)
+ return errors.New("data is too large")
+ }
+
+ sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, data)
+ if err != nil {
+ log.Error("Sign for the block header failed when sealing", "err", err)
+ return errors.New("sign for the block header failed")
+ }
+
+ if blockAccessList.SignData == nil {
+ blockAccessList.SignData = make([]byte, 65)
+ }
+ copy(blockAccessList.SignData, sig)
+ return nil
+}
+
+func (p *Parlia) VerifyBAL(block *types.Block, blockAccessList *types.BlockAccessListEncode) error {
+ if blockAccessList.Version != 1 {
+ log.Error("invalid BAL version", "version", blockAccessList.Version)
+ return errors.New("invalid BAL version")
+ }
+
+ if blockAccessList.AccessList.Hash().Cmp(blockAccessList.Hash) == 0 {
+ // TODO: skip the BAL signature verify temporarily
+ log.Info("skip the BAL signature verify temporarily", "block", block.Number(), "hash", block.Hash())
+ return nil
+ }
+
+ if len(blockAccessList.SignData) != 65 {
+ log.Error("invalid BAL signature", "signatureSize", len(blockAccessList.SignData))
+ return errors.New("invalid BAL signature")
+ }
+
+ // Recover the public key and the Ethereum address
+ data, err := rlp.EncodeToBytes([]interface{}{blockAccessList.Version, blockAccessList.Number, blockAccessList.Hash, blockAccessList.AccessList.Hash()})
+ if err != nil {
+ log.Error("encode to bytes failed", "err", err)
+ return errors.New("encode to bytes failed")
+ }
+
+ if len(data) > int(params.MaxBALSize) {
+ log.Error("data is too large", "dataSize", len(data), "maxSize", params.MaxBALSize)
+ return errors.New("data is too large")
+ }
+
+ pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), blockAccessList.SignData)
+ if err != nil {
+ log.Error("Ecrecover failed", "err", err, "signData", blockAccessList.SignData)
+ return err
+ }
+ var pubkeyAddr common.Address
+ copy(pubkeyAddr[:], crypto.Keccak256(pubkey[1:])[12:])
+
+ signer := block.Header().Coinbase
+ if signer != pubkeyAddr {
+ log.Error("BAL signer mismatch", "signer", signer, "pubkeyAddr", pubkeyAddr, "bal.Number", blockAccessList.Number, "bal.Hash", blockAccessList.Hash, "bal.signdata", common.Bytes2Hex(blockAccessList.SignData))
+ return errors.New("signer mismatch")
+ }
+
+ return nil
+}
+
// FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set,
// nor block rewards given, and returns the final block.
-func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB,
- body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB,
+ body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalize func()) (*types.Block, []*types.Receipt, error) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
cx := chainContext{Chain: chain, parlia: p}
@@ -1508,23 +1583,27 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
if receipts == nil {
receipts = make([]*types.Receipt, 0)
}
+ var workingState vm.StateDB = statedb
+ if tracer != nil {
+ workingState = state.NewHookedState(statedb, tracer)
+ }
parent := chain.GetHeaderByHash(header.ParentHash)
if parent == nil {
return nil, nil, errors.New("parent not found")
}
- systemcontracts.TryUpdateBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, state, false)
+ systemcontracts.TryUpdateBuildInSystemContract(p.chainConfig, header.Number, parent.Time, header.Time, workingState, false)
if p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
- err := p.initializeFeynmanContract(state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
+ err := p.initializeFeynmanContract(workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
if err != nil {
log.Error("init feynman contract failed", "error", err)
}
}
if header.Number.Cmp(common.Big1) == 0 {
- err := p.initContract(state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
+ err := p.initContract(workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
if err != nil {
log.Error("init contract failed")
}
@@ -1548,7 +1627,7 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
}
}
if !signedRecently {
- err = p.slash(spoiledVal, state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
+ err = p.slash(spoiledVal, workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
if err != nil {
// it is possible that slash validator failed because of the slash channel is disabled.
log.Error("slash validator failed", "block hash", header.Hash(), "address", spoiledVal)
@@ -1556,13 +1635,13 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
}
}
- err := p.distributeIncoming(p.val, state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
+ err := p.distributeIncoming(p.val, workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer)
if err != nil {
return nil, nil, err
}
if p.chainConfig.IsPlato(header.Number) {
- if err := p.distributeFinalityReward(chain, state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer); err != nil {
+ if err := p.distributeFinalityReward(chain, workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer); err != nil {
return nil, nil, err
}
}
@@ -1571,7 +1650,7 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
if p.chainConfig.IsFeynman(header.Number, header.Time) && isBreatheBlock(parent.Time, header.Time) {
// we should avoid update validators in the Feynman upgrade block
if !p.chainConfig.IsOnFeynman(header.Number, parent.Time, header.Time) {
- if err := p.updateValidatorSetV2(state, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer); err != nil {
+ if err := p.updateValidatorSetV2(workingState, header, cx, &body.Transactions, &receipts, nil, &header.GasUsed, true, tracer); err != nil {
return nil, nil, err
}
}
@@ -1582,12 +1661,17 @@ func (p *Parlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *
return nil, nil, errors.New("gas consumption of system txs exceed the gas limit")
}
header.UncleHash = types.EmptyUncleHash
+
+ if onFinalize != nil {
+ onFinalize()
+ }
+
var blk *types.Block
var rootHash common.Hash
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
- rootHash = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ rootHash = workingState.IntermediateRoot(chain.Config().IsEIP158(header.Number))
wg.Done()
}()
go func() {
@@ -1787,71 +1871,6 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
return nil
}
-func (p *Parlia) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
- p.lock.RLock()
- val, signFn := p.val, p.signFn
- p.lock.RUnlock()
-
- data, err := rlp.EncodeToBytes([]interface{}{blockAccessList.Version, blockAccessList.Number, blockAccessList.Hash, blockAccessList.Accounts})
- if err != nil {
- log.Error("Encode to bytes failed when sealing", "err", err)
- return errors.New("encode to bytes failed")
- }
-
- if len(data) > int(params.MaxBALSize) {
- log.Error("data is too large", "dataSize", len(data), "maxSize", params.MaxBALSize)
- return errors.New("data is too large")
- }
-
- sig, err := signFn(accounts.Account{Address: val}, accounts.MimetypeParlia, data)
- if err != nil {
- log.Error("Sign for the block header failed when sealing", "err", err)
- return errors.New("sign for the block header failed")
- }
-
- copy(blockAccessList.SignData, sig)
- return nil
-}
-
-func (p *Parlia) VerifyBAL(block *types.Block, bal *types.BlockAccessListEncode) error {
- if bal.Version != 0 {
- log.Error("invalid BAL version", "version", bal.Version)
- return errors.New("invalid BAL version")
- }
-
- if len(bal.SignData) != 65 {
- log.Error("invalid BAL signature", "signatureSize", len(bal.SignData))
- return errors.New("invalid BAL signature")
- }
-
- // Recover the public key and the Ethereum address
- data, err := rlp.EncodeToBytes([]interface{}{bal.Version, block.Number(), block.Hash(), bal.Accounts})
- if err != nil {
- log.Error("encode to bytes failed", "err", err)
- return errors.New("encode to bytes failed")
- }
-
- if len(data) > int(params.MaxBALSize) {
- log.Error("data is too large", "dataSize", len(data), "maxSize", params.MaxBALSize)
- return errors.New("data is too large")
- }
-
- pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), bal.SignData)
- if err != nil {
- return err
- }
- var pubkeyAddr common.Address
- copy(pubkeyAddr[:], crypto.Keccak256(pubkey[1:])[12:])
-
- signer := block.Header().Coinbase
- if signer != pubkeyAddr {
- log.Error("BAL signer mismatch", "signer", signer, "pubkeyAddr", pubkeyAddr, "bal.Number", bal.Number, "bal.Hash", bal.Hash)
- return errors.New("signer mismatch")
- }
-
- return nil
-}
-
func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderReader, header *types.Header, snap *Snapshot) bool {
if header.Difficulty.Cmp(diffInTurn) == 0 {
return false
@@ -2039,7 +2058,7 @@ func (p *Parlia) distributeIncoming(val common.Address, state vm.StateDB, header
rewards := new(uint256.Int)
rewards = rewards.Rsh(balance, systemRewardPercent)
if rewards.Cmp(common.U2560) > 0 {
- state.SetBalance(consensus.SystemAddress, balance.Sub(balance, rewards), tracing.BalanceChangeUnspecified)
+ state.SubBalance(consensus.SystemAddress, rewards, tracing.BalanceChangeUnspecified)
state.AddBalance(coinbase, rewards, tracing.BalanceChangeUnspecified)
err := p.distributeToSystem(rewards.ToBig(), state, header, chain, txs, receipts, receivedTxs, usedGas, mining, tracer)
if err != nil {
@@ -2054,7 +2073,7 @@ func (p *Parlia) distributeIncoming(val common.Address, state vm.StateDB, header
return nil
}
- state.SetBalance(consensus.SystemAddress, common.U2560, tracing.BalanceDecreaseBSCDistributeReward)
+ state.SubBalance(consensus.SystemAddress, balance, tracing.BalanceDecreaseBSCDistributeReward)
state.AddBalance(coinbase, balance, tracing.BalanceIncreaseBSCDistributeReward)
log.Trace("distribute to validator contract", "block hash", header.Hash(), "amount", balance)
return p.distributeToValidator(balance.ToBig(), val, state, header, chain, txs, receipts, receivedTxs, usedGas, mining, tracer)
@@ -2192,6 +2211,11 @@ func (p *Parlia) applyTransaction(
// move to next
*receivedTxs = (*receivedTxs)[1:]
}
+ if indexer, ok := state.(interface {
+ SetAccessListIndex(int)
+ }); ok {
+ indexer.SetAccessListIndex(len(*txs) + 1)
+ }
state.SetTxContext(expectedTx.Hash(), len(*txs))
// Create a new context to be used in the EVM environment
@@ -2446,9 +2470,6 @@ func (p *Parlia) detectNewVersionWithFork(chain consensus.ChainHeaderReader, hea
forkHashHex := hex.EncodeToString(nextForkHash[:])
if !snap.isMajorityFork(forkHashHex) {
logFn := log.Debug
- if state.NoTries() {
- logFn = log.Warn
- }
logFn("possible fork detected: client is not in majority", "nextForkHash", forkHashHex)
}
}
@@ -2493,8 +2514,6 @@ func applyMessage(
if chainConfig.IsCancun(header.Number, header.Time) {
rules := evm.ChainConfig().Rules(evm.Context.BlockNumber, evm.Context.Random != nil, evm.Context.Time)
state.Prepare(rules, msg.From, evm.Context.Coinbase, msg.To, vm.ActivePrecompiles(rules), msg.AccessList)
- } else {
- state.ClearAccessList()
}
// Increment the nonce for the next transaction
state.SetNonce(msg.From, state.GetNonce(msg.From)+1, tracing.NonceChangeEoACall)
diff --git a/consensus/parlia/parlia_test.go b/consensus/parlia/parlia_test.go
index d726ae6a27..e63aaf3434 100644
--- a/consensus/parlia/parlia_test.go
+++ b/consensus/parlia/parlia_test.go
@@ -10,7 +10,6 @@ import (
"strings"
"testing"
- "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
cmath "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
@@ -619,16 +618,15 @@ func TestSimulateP2P(t *testing.T) {
if err != nil {
t.Fatalf("[Testcase %d] simulate P2P error: %v", index, err)
}
- /*
- for _, val := range c.validators {
- t.Logf("[Testcase %d] validator(%d) head block: %d",
- index, val.index, val.head.blockNumber)
- t.Logf("[Testcase %d] validator(%d) highest justified block: %d",
- index, val.index, val.head.GetJustifiedNumber())
- t.Logf("[Testcase %d] validator(%d) highest finalized block: %d",
- index, val.index, val.head.GetFinalizedBlock().blockNumber)
- }
- */
+ for _, val := range c.validators {
+ t.Logf("[Testcase %d] validator(%d) head block: %d",
+ index, val.index, val.head.blockNumber)
+ t.Logf("[Testcase %d] validator(%d) highest justified block: %d",
+ index, val.index, val.head.GetJustifiedNumber())
+ t.Logf("[Testcase %d] validator(%d) highest finalized block: %d",
+ index, val.index, val.head.GetFinalizedBlock().blockNumber)
+ }
+
if c.CheckChain() == false {
t.Fatalf("[Testcase %d] chain not works as expected", index)
}
@@ -846,13 +844,17 @@ func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.H
return
}
-func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalize func()) (*types.Block, []*types.Receipt, error) {
// Finalize block
c.Finalize(chain, header, state, &body.Transactions, body.Uncles, body.Withdrawals, nil, nil, nil, tracer)
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ if onFinalize != nil {
+ onFinalize()
+ }
+
// Header seems complete, assemble into a block and return
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), receipts, nil
}
@@ -861,489 +863,10 @@ func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint
return big.NewInt(1)
}
-func TestSignBAL(t *testing.T) {
- // Setup test environment
- key, _ := crypto.GenerateKey()
- addr := crypto.PubkeyToAddress(key.PublicKey)
-
- // Create mock signing function that succeeds
- mockSignFn := func(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
- if account.Address != addr {
- return nil, fmt.Errorf("wrong address")
- }
- if mimeType != accounts.MimetypeParlia {
- return nil, fmt.Errorf("wrong mime type")
- }
- // Return a dummy 65-byte signature
- sig := make([]byte, 65)
- copy(sig, []byte("test_signature_data_for_testing_purposes_123456789012345678901234"))
- return sig, nil
- }
-
- // Create Parlia instance
- parlia := &Parlia{
- val: addr,
- signFn: mockSignFn,
- }
-
- tests := []struct {
- name string
- bal *types.BlockAccessListEncode
- expectedError bool
- signFn SignerFn
- description string
- }{
- {
- name: "successful signing",
- bal: &types.BlockAccessListEncode{
- Version: 0,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- },
- },
- expectedError: false,
- signFn: mockSignFn,
- description: "Should successfully sign a valid BlockAccessListEncode",
- },
- {
- name: "signing function error",
- bal: &types.BlockAccessListEncode{
- Version: 0,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{},
- },
- expectedError: true,
- signFn: func(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
- return nil, fmt.Errorf("signing failed")
- },
- description: "Should return error when signing function fails",
- },
- {
- name: "empty accounts list",
- bal: &types.BlockAccessListEncode{
- Version: 0,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{},
- },
- expectedError: false,
- signFn: mockSignFn,
- description: "Should successfully sign even with empty accounts list",
- },
- {
- name: "multiple accounts",
- bal: &types.BlockAccessListEncode{
- Version: 2,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1111111111111111111111111111111111111111"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- },
- },
- {
- Address: common.HexToAddress("0x2222222222222222222222222222222222222222"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x03"), TxIndex: 2, Dirty: false},
- },
- },
- },
- },
- expectedError: false,
- signFn: mockSignFn,
- description: "Should successfully sign with multiple accounts",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- // Set up Parlia with the test signing function
- parlia.signFn = tt.signFn
-
- // Call SignBAL
- err := parlia.SignBAL(tt.bal)
-
- // Check results
- if tt.expectedError {
- if err == nil {
- t.Errorf("Expected error but got none. %s", tt.description)
- }
- } else {
- if err != nil {
- t.Errorf("Expected no error but got: %v. %s", err, tt.description)
- }
- // Verify signature was copied to SignData
- if tt.bal != nil && len(tt.bal.SignData) != 65 {
- t.Errorf("Expected SignData to be 65 bytes, got %d", len(tt.bal.SignData))
- }
- // Verify signature content (for successful cases)
- if tt.bal != nil && !tt.expectedError {
- expectedSig := "test_signature_data_for_testing_purposes_123456789012345678901234"
- if string(tt.bal.SignData[:len(expectedSig)]) != expectedSig {
- t.Errorf("SignData was not properly set")
- }
- }
- }
- })
- }
-}
-
-func TestVerifyBAL(t *testing.T) {
- // Setup test environment
- signerKey, _ := crypto.GenerateKey()
- signerAddr := crypto.PubkeyToAddress(signerKey.PublicKey)
-
- // Helper function to create a properly signed BAL
- createBlockWithBAL := func(addr common.Address, version uint32, signLength int, accounts []types.AccountAccessListEncode) *types.Block {
- header := &types.Header{
- ParentHash: types.EmptyRootHash,
- Number: big.NewInt(10),
- Coinbase: addr,
- }
- block := types.NewBlock(header, nil, nil, nil)
- bal := &types.BlockAccessListEncode{
- Version: version,
- Number: block.Number().Uint64(),
- Hash: block.Hash(),
- SignData: make([]byte, signLength),
- Accounts: accounts,
- }
-
- // RLP encode the data
- data, _ := rlp.EncodeToBytes([]interface{}{bal.Version, bal.Number, bal.Hash, bal.Accounts})
-
- // Create signature using the test key
- hash := crypto.Keccak256(data)
- sig, _ := crypto.Sign(hash, signerKey)
- copy(bal.SignData, sig)
- block = block.WithBAL(bal)
- return block
- }
-
- // Create a Parlia instance
- parlia := &Parlia{}
-
- tests := []struct {
- name string
- block *types.Block
- expectedError bool
- description string
- }{
- {
- name: "valid signature verification",
- block: createBlockWithBAL(signerAddr, 0, 65, []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- }),
- expectedError: false,
- description: "Should successfully verify a properly signed BAL",
- },
- {
- name: "invalid version",
- block: createBlockWithBAL(signerAddr, 1, 65, []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- }),
- expectedError: true,
- description: "Should fail when version is invalid",
- },
- {
- name: "invalid signature length - too short",
- block: createBlockWithBAL(signerAddr, 0, 64, []types.AccountAccessListEncode{}),
- expectedError: true,
- description: "Should fail when signature is too short",
- },
- {
- name: "invalid signature length - too long",
- block: createBlockWithBAL(signerAddr, 0, 66, []types.AccountAccessListEncode{}),
- expectedError: true,
- description: "Should fail when signature is too long",
- },
- {
- name: "empty signature",
- block: createBlockWithBAL(signerAddr, 0, 0, []types.AccountAccessListEncode{}),
- expectedError: true,
- description: "Should fail with empty signature",
- },
- {
- name: "signer mismatch",
- block: createBlockWithBAL(common.HexToAddress("0x1234567890123456789012345678901234567890"), 0, 65, []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- }),
- expectedError: true,
- description: "Should fail when signer address doesn't match recovered address",
- },
- {
- name: "empty accounts list",
- block: createBlockWithBAL(signerAddr, 0, 65, []types.AccountAccessListEncode{}),
- expectedError: false,
- description: "Should successfully verify BAL with empty accounts",
- },
- {
- name: "multiple accounts",
- block: createBlockWithBAL(signerAddr, 0, 65, []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1111111111111111111111111111111111111111"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- },
- },
- {
- Address: common.HexToAddress("0x2222222222222222222222222222222222222222"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x03"), TxIndex: 2, Dirty: false},
- },
- },
- }),
- expectedError: false,
- description: "Should successfully verify BAL with multiple accounts",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- err := parlia.VerifyBAL(tt.block, tt.block.BAL())
- if tt.expectedError {
- if err == nil {
- t.Errorf("Expected error but got none. %s", tt.description)
- }
- } else {
- if err != nil {
- t.Errorf("Expected no error but got: %v. %s", err, tt.description)
- }
- }
- })
- }
-}
-
-func TestVerifyBAL_EdgeCases(t *testing.T) {
- // Test with different key to ensure proper signature verification
- key1, _ := crypto.GenerateKey()
- key2, _ := crypto.GenerateKey()
- addr1 := crypto.PubkeyToAddress(key1.PublicKey)
- addr2 := crypto.PubkeyToAddress(key2.PublicKey)
-
- parlia := &Parlia{}
-
- header1 := &types.Header{
- ParentHash: types.EmptyRootHash,
- Number: big.NewInt(10),
- Coinbase: addr1,
- }
- block1 := types.NewBlock(header1, nil, nil, nil)
- // Create BAL signed with key1
- bal := &types.BlockAccessListEncode{
- Version: 0,
- Number: block1.Number().Uint64(),
- Hash: block1.Hash(),
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- },
- }
-
- // Sign with key1
- data, _ := rlp.EncodeToBytes([]interface{}{bal.Version, bal.Number, bal.Hash, bal.Accounts})
- hash := crypto.Keccak256(data)
- sig, _ := crypto.Sign(hash, key1)
- copy(bal.SignData, sig)
-
- // Should succeed with addr1
- err := parlia.VerifyBAL(block1, bal)
- if err != nil {
- t.Errorf("Verification with correct signer failed: %v", err)
- }
-
- // Should fail with addr2 (different key)
- header2 := &types.Header{
- ParentHash: types.EmptyRootHash,
- Number: big.NewInt(10),
- Coinbase: addr2,
- }
- block2 := types.NewBlock(header2, nil, nil, nil)
- err = parlia.VerifyBAL(block2, bal)
- if err == nil {
- t.Error("Expected verification to fail with different signer address")
- }
-}
-
-func TestVerifyBAL_TooLargeData(t *testing.T) {
- // Test with large amount of data to ensure RLP encoding works correctly
- key, _ := crypto.GenerateKey()
- addr := crypto.PubkeyToAddress(key.PublicKey)
- parlia := &Parlia{}
-
- // Create BAL with many accounts
- accounts := make([]types.AccountAccessListEncode, 20000)
- for i := 0; i < 20000; i++ {
- accounts[i] = types.AccountAccessListEncode{
- Address: common.BigToAddress(big.NewInt(int64(i))),
- StorageItems: []types.StorageAccessItem{
- {Key: common.BigToHash(big.NewInt(int64(i))), TxIndex: uint32(i), Dirty: i%2 == 0},
- {Key: common.BigToHash(big.NewInt(int64(i + 1000))), TxIndex: uint32(i + 1), Dirty: i%3 == 0},
- },
- }
- }
-
- header := &types.Header{
- ParentHash: types.EmptyRootHash,
- Number: big.NewInt(10),
- Coinbase: addr,
- }
- block := types.NewBlock(header, nil, nil, nil)
- bal := &types.BlockAccessListEncode{
- Version: 0,
- Number: block.Number().Uint64(),
- Hash: block.Hash(),
- SignData: make([]byte, 65),
- Accounts: accounts,
- }
-
- // Sign the large data
- data, err := rlp.EncodeToBytes([]interface{}{bal.Version, bal.Number, bal.Hash, bal.Accounts})
- if err != nil {
- t.Fatalf("Failed to RLP encode large data: %v", err)
- }
-
- hash := crypto.Keccak256(data)
- sig, err := crypto.Sign(hash, key)
- if err != nil {
- t.Fatalf("Failed to sign large data: %v", err)
- }
- copy(bal.SignData, sig)
-
- // Verify the signature
- err = parlia.VerifyBAL(block, bal)
- if err.Error() != "data is too large" {
- t.Errorf("Failed to verify BAL with large data: %v", err)
- }
+func (c *mockParlia) SignBAL(blockAccessList *types.BlockAccessListEncode) error {
+ return nil
}
-func TestSignBAL_VerifyBAL_Integration(t *testing.T) {
- // Test complete sign-verify cycle
- key, _ := crypto.GenerateKey()
- addr := crypto.PubkeyToAddress(key.PublicKey)
-
- // Create mock signing function
- mockSignFn := func(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
- if account.Address != addr {
- return nil, fmt.Errorf("wrong address")
- }
- if mimeType != accounts.MimetypeParlia {
- return nil, fmt.Errorf("wrong mime type")
- }
- // Use the actual private key to sign
- hash := crypto.Keccak256(data)
- return crypto.Sign(hash, key)
- }
-
- parlia := &Parlia{
- val: addr,
- signFn: mockSignFn,
- }
-
- testCases := []struct {
- name string
- version uint32
- accounts []types.AccountAccessListEncode
- }{
- {
- name: "empty accounts",
- version: 0,
- accounts: []types.AccountAccessListEncode{},
- },
- {
- name: "single account",
- version: 0,
- accounts: []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- },
- },
- },
- },
- {
- name: "multiple accounts",
- version: 0,
- accounts: []types.AccountAccessListEncode{
- {
- Address: common.HexToAddress("0x1111111111111111111111111111111111111111"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- {
- Address: common.HexToAddress("0x2222222222222222222222222222222222222222"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- {Key: common.HexToHash("0x03"), TxIndex: 2, Dirty: false},
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- header := &types.Header{
- ParentHash: types.EmptyRootHash,
- Number: big.NewInt(10),
- Coinbase: addr,
- }
- block := types.NewBlock(header, nil, nil, nil)
- // Create BAL
- bal := &types.BlockAccessListEncode{
- Version: tc.version,
- Number: block.Number().Uint64(),
- Hash: block.Hash(),
- SignData: make([]byte, 65),
- Accounts: tc.accounts,
- }
-
- // Sign the BAL
- err := parlia.SignBAL(bal)
- if err != nil {
- t.Fatalf("SignBAL failed: %v", err)
- }
-
- // Verify signature length
- if len(bal.SignData) != 65 {
- t.Errorf("Expected SignData to be 65 bytes, got %d", len(bal.SignData))
- }
-
- // Verify the BAL with correct signer
- err = parlia.VerifyBAL(block, bal)
- if err != nil {
- t.Errorf("VerifyBAL failed with correct signer: %v", err)
- }
- })
- }
+func (c *mockParlia) VerifyBAL(block *types.Block, blockAccessList *types.BlockAccessListEncode) error {
+ return nil
}
diff --git a/core/block_access_list_tracer.go b/core/block_access_list_tracer.go
new file mode 100644
index 0000000000..54648769c4
--- /dev/null
+++ b/core/block_access_list_tracer.go
@@ -0,0 +1,113 @@
+package core
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/types/bal"
+ "github.com/holiman/uint256"
+)
+
+// BlockAccessListTracer is a tracer which gathers state accesses/mutations
+// from the execution of a block. It is used for constructing and verifying
+// EIP-7928 block access lists.
+type BlockAccessListTracer struct {
+ builder *bal.AccessListBuilder
+
+ // the access list index that changes are currently being recorded into
+ balIdx uint16
+}
+
+// NewBlockAccessListTracer returns an BlockAccessListTracer and a set of hooks
+func NewBlockAccessListTracer() (*BlockAccessListTracer, *tracing.Hooks) {
+ balTracer := &BlockAccessListTracer{
+ builder: bal.NewAccessListBuilder(),
+ }
+ hooks := &tracing.Hooks{
+ OnBlockFinalization: balTracer.OnBlockFinalization,
+ OnPreTxExecutionDone: balTracer.OnPreTxExecutionDone,
+ OnTxEnd: balTracer.TxEndHook,
+ OnEnter: balTracer.OnEnter,
+ OnExit: balTracer.OnExit,
+ OnCodeChangeV2: balTracer.OnCodeChange,
+ OnBalanceChange: balTracer.OnBalanceChange,
+ OnNonceChangeV2: balTracer.OnNonceChange,
+ OnStorageChange: balTracer.OnStorageChange,
+ OnStorageRead: balTracer.OnStorageRead,
+ OnAccountRead: balTracer.OnAcountRead,
+ OnSelfDestructChange: balTracer.OnSelfDestruct,
+ }
+ wrappedHooks, _ := tracing.WrapWithJournal(hooks)
+ return balTracer, wrappedHooks
+}
+
+// AccessList returns the constructed access list.
+// It is assumed that this is only called after all the block state changes
+// have been executed and the block has been finalized.
+func (a *BlockAccessListTracer) AccessList() *bal.AccessListBuilder {
+ return a.builder
+}
+
+func (a *BlockAccessListTracer) AccessListEncoded(number uint64, hash common.Hash) *types.BlockAccessListEncode {
+ return &types.BlockAccessListEncode{
+ Version: 1,
+ Number: number,
+ Hash: hash,
+ SignData: make([]byte, 65),
+ AccessList: a.AccessList().ToEncodingObj(),
+ }
+}
+
+func (a *BlockAccessListTracer) OnPreTxExecutionDone() {
+ a.builder.FinaliseIdxChanges(0)
+ a.balIdx++
+}
+
+func (a *BlockAccessListTracer) TxEndHook(receipt *types.Receipt, err error) {
+ a.builder.FinaliseIdxChanges(a.balIdx)
+ a.balIdx++
+}
+
+func (a *BlockAccessListTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
+ a.builder.EnterScope()
+}
+
+func (a *BlockAccessListTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
+ a.builder.ExitScope(reverted)
+}
+
+func (a *BlockAccessListTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason tracing.CodeChangeReason) {
+ a.builder.CodeChange(addr, prevCode, code)
+}
+
+func (a *BlockAccessListTracer) OnSelfDestruct(addr common.Address) {
+ a.builder.SelfDestruct(addr)
+}
+
+func (a *BlockAccessListTracer) OnBlockFinalization() {
+ a.builder.FinaliseIdxChanges(a.balIdx)
+}
+
+func (a *BlockAccessListTracer) OnBalanceChange(addr common.Address, prevBalance, newBalance *big.Int, _ tracing.BalanceChangeReason) {
+ newU256 := new(uint256.Int).SetBytes(newBalance.Bytes())
+ prevU256 := new(uint256.Int).SetBytes(prevBalance.Bytes())
+ a.builder.BalanceChange(addr, prevU256, newU256)
+}
+
+func (a *BlockAccessListTracer) OnNonceChange(addr common.Address, prev uint64, new uint64, reason tracing.NonceChangeReason) {
+ a.builder.NonceChange(addr, prev, new)
+}
+
+func (a *BlockAccessListTracer) OnStorageRead(addr common.Address, key common.Hash) {
+ a.builder.StorageRead(addr, key)
+}
+
+func (a *BlockAccessListTracer) OnAcountRead(addr common.Address) {
+ a.builder.AccountRead(addr)
+}
+
+func (a *BlockAccessListTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
+ a.builder.StorageWrite(addr, slot, prev, new)
+}
diff --git a/core/block_validator.go b/core/block_validator.go
index b50f6c7173..0b6a54a21f 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -110,6 +110,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
return errors.New("data blobs present in block body")
}
}
+
return nil
},
func() error {
diff --git a/core/blockchain.go b/core/blockchain.go
index 5bc0ab3b4e..ef4fbafce2 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -46,6 +46,7 @@ import (
"github.com/ethereum/go-ethereum/core/systemcontracts"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@@ -110,6 +111,13 @@ var (
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
+ // BAL-specific timers
+ blockPreprocessingTimer = metrics.NewRegisteredResettingTimer("chain/preprocess", nil)
+ blockPrestateLoadTimer = metrics.NewRegisteredResettingTimer("chain/prestateload", nil)
+ txExecutionTimer = metrics.NewRegisteredResettingTimer("chain/txexecution", nil)
+ stateRootCalctimer = metrics.NewRegisteredResettingTimer("chain/rootcalculation", nil)
+ blockPostprocessingTimer = metrics.NewRegisteredResettingTimer("chain/postprocess", nil)
+
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
blockReorgDropMeter = metrics.NewRegisteredMeter("chain/reorg/drop", nil)
@@ -223,7 +231,7 @@ type BlockChainConfig struct {
// If the value is -1, indexing is disabled.
TxLookupLimit int64
- // EnableBAL enables the block access list feature
+ // EnableBAL enables block access list creation and verification for post-Cancun blocks which contain access lists.
EnableBAL bool
}
@@ -397,12 +405,13 @@ type BlockChain struct {
stopping atomic.Bool // false if chain is running, true when stopped
procInterrupt atomic.Bool // interrupt signaler for block processing
- engine consensus.Engine
- prefetcher Prefetcher
- validator Validator // Block and state validator interface
- processor Processor // Block transaction processor interface
- forker *ForkChoice
- logger *tracing.Hooks
+ engine consensus.Engine
+ prefetcher Prefetcher
+ validator Validator // Block and state validator interface
+ processor Processor // Block transaction processor interface
+ parallelProcessor ParallelStateProcessor
+ forker *ForkChoice
+ logger *tracing.Hooks
lastForkReadyAlert time.Time // Last time there was a fork readiness print out
@@ -410,6 +419,16 @@ type BlockChain struct {
doubleSignMonitor *monitor.DoubleSignMonitor
}
+// SignBAL implements consensus.ChainHeaderReader.
+func (bc *BlockChain) SignBAL(blockAccessList *bal.BlockAccessList) error {
+ panic("unimplemented")
+}
+
+// VerifyBAL implements consensus.ChainHeaderReader.
+func (bc *BlockChain) VerifyBAL(block *types.Block, bal *bal.BlockAccessList) error {
+ panic("unimplemented")
+}
+
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialises the default Ethereum Validator
// and Processor.
@@ -499,6 +518,7 @@ func NewBlockChain(db ethdb.Database, genesis *Genesis, engine consensus.Engine,
bc.validator = NewBlockValidator(chainConfig, bc)
bc.prefetcher = NewStatePrefetcher(chainConfig, bc.hc)
bc.processor = NewStateProcessor(chainConfig, bc.hc)
+ bc.parallelProcessor = NewParallelStateProcessor(chainConfig, bc.hc, bc.GetVMConfig())
genesisHeader := bc.GetHeaderByNumber(0)
if genesisHeader == nil {
@@ -1257,7 +1277,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha
rawdb.DeleteReceipts(db, hash, num)
rawdb.DeleteTd(db, hash, num)
rawdb.DeleteBlobSidecars(db, hash, num)
- rawdb.DeleteBAL(db, hash, num)
}
// Todo(rjl493456442) txlookup, log index, etc
}
@@ -1771,9 +1790,10 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
rawdb.WriteRawReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
rawdb.WriteBlobSidecars(batch, block.Hash(), block.NumberU64(), block.Sidecars())
+ if bc.chainConfig.EnableBAL && block.AccessList() != nil {
+ rawdb.WriteBlockAccessList(batch, block.Hash(), block.NumberU64(), block.AccessList())
+ }
}
- rawdb.WriteBAL(batch, block.Hash(), block.NumberU64(), block.BAL())
-
// Write everything belongs to the blocks into the database. So that
// we can ensure all components of body is completed(body, receipts)
// except transaction indexes(will be created once sync is finished).
@@ -1851,7 +1871,7 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars())
}
- rawdb.WriteBAL(blockBatch, block.Hash(), block.NumberU64(), block.BAL())
+ rawdb.WriteBlockAccessList(blockBatch, block.Hash(), block.NumberU64(), block.AccessList())
if err := blockBatch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err)
}
@@ -1898,7 +1918,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
if bc.chainConfig.IsCancun(block.Number(), block.Time()) {
rawdb.WriteBlobSidecars(blockBatch, block.Hash(), block.NumberU64(), block.Sidecars())
}
- rawdb.WriteBAL(blockBatch, block.Hash(), block.NumberU64(), block.BAL())
+ rawdb.WriteBlockAccessList(blockBatch, block.Hash(), block.NumberU64(), block.AccessList())
if bc.db.HasSeparateStateStore() {
rawdb.WritePreimages(bc.db.GetStateStore(), statedb.Preimages())
} else {
@@ -2109,6 +2129,7 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked.
for i := 1; i < len(chain); i++ {
+ log.Debug("Inserting block", "hash", chain[i].Hash(), "number", chain[i].Number(), "difficulty", chain[i].Difficulty)
block, prev := chain[i], chain[i-1]
if block.NumberU64() != prev.NumberU64()+1 || block.ParentHash() != prev.Hash() {
log.Error("Non contiguous block insert",
@@ -2348,7 +2369,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
bc.updateHighestVerifiedHeader(block.Header())
// The traced section of block import.
start := time.Now()
- res, err := bc.processBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1)
+ // construct or verify block access lists if BALs are enabled and
+ // we are post-selfdestruct removal fork.
+ enableBAL := bc.cfg.EnableBAL
+ blockHasAccessList := block.AccessList() != nil
+ makeBAL := enableBAL && !blockHasAccessList
+ validateBAL := enableBAL && blockHasAccessList
+
+ res, err := bc.ProcessBlock(parent.Root, block, setHead, makeWitness && len(chain) == 1, makeBAL, validateBAL)
if err != nil {
return nil, it.index, err
}
@@ -2449,7 +2477,7 @@ type blockProcessingResult struct {
// processBlock executes and validates the given block. If there was no error
// it writes the block and associated state to database.
-func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool) (_ *blockProcessingResult, blockEndErr error) {
+func (bc *BlockChain) ProcessBlock(parentRoot common.Hash, block *types.Block, setHead bool, makeWitness bool, constructBALForTesting bool, validateBAL bool) (_ *blockProcessingResult, blockEndErr error) {
var (
err error
startTime = time.Now()
@@ -2459,9 +2487,10 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
defer interrupt.Store(true) // terminate the prefetch at the end
needBadSharedStorage := bc.chainConfig.NeedBadSharedStorage(block.Number())
- needPrefetch := needBadSharedStorage || (!bc.cfg.NoPrefetch && len(block.Transactions()) >= prefetchTxNumber) || block.BAL() != nil
+ needPrefetch := needBadSharedStorage || (!bc.cfg.NoPrefetch && len(block.Transactions()) >= prefetchTxNumber)
if !needPrefetch {
statedb, err = state.New(parentRoot, bc.statedb)
+
if err != nil {
return nil, err
}
@@ -2497,15 +2526,12 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
storageCacheMissMeter.Mark(stats.StorageMiss)
}()
- interruptChan := make(chan struct{})
- defer close(interruptChan)
go func(start time.Time, throwaway *state.StateDB, block *types.Block) {
// Disable tracing for prefetcher executions.
vmCfg := bc.cfg.VmConfig
vmCfg.Tracer = nil
- if block.BAL() != nil {
- bc.prefetcher.PrefetchBAL(block, throwaway, interruptChan)
- } else {
+
+ if block.AccessList() == nil {
bc.prefetcher.Prefetch(block.Transactions(), block.Header(), block.GasLimit(), throwaway, vmCfg, &interrupt)
}
@@ -2530,8 +2556,15 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
return nil, err
}
}
- statedb.StartPrefetcher("chain", witness)
- defer statedb.StopPrefetcher()
+
+ // access-list containing blocks don't use the prefetcher because
+ // state root computation proceeds concurrently with transaction
+ // execution, meaning the prefetcher doesn't have any time to run
+ // before the trie nodes are needed for state root computation.
+ if block.AccessList() == nil {
+ statedb.StartPrefetcher("chain", witness)
+ defer statedb.StopPrefetcher()
+ }
}
if bc.logger != nil && bc.logger.OnBlockStart != nil {
@@ -2549,25 +2582,104 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
}()
}
- // Process block using the parent state as reference point
- pstart := time.Now()
- statedb.SetExpectedStateRoot(block.Root())
- statedb.SetNeedBadSharedStorage(needBadSharedStorage)
- res, err := bc.processor.Process(block, statedb, bc.cfg.VmConfig)
- if err != nil {
- bc.reportBlock(block, res, err)
- return nil, err
+ blockHadBAL := block.AccessList() != nil
+ parallelMetrics := blockHadBAL
+ var res *ProcessResult
+ var resWithMetrics *ProcessResultWithMetrics
+ var ptime, vtime time.Duration
+
+ runSequential := func(state *state.StateDB, logMsg string) (*ProcessResult, time.Duration, time.Duration, error) {
+ log.Info(logMsg, "block", block.Number(), "hash", block.Hash())
+ state.SetExpectedStateRoot(block.Root())
+ state.SetNeedBadSharedStorage(needBadSharedStorage)
+ var balTracer *BlockAccessListTracer
+ // Process block using the parent state as reference point
+ if constructBALForTesting {
+ balTracer, bc.cfg.VmConfig.Tracer = NewBlockAccessListTracer()
+ defer func() {
+ bc.cfg.VmConfig.Tracer = nil
+ }()
+ }
+
+ pstart := time.Now()
+ result, err := bc.processor.Process(block, state, bc.cfg.VmConfig)
+ if err != nil {
+ return result, time.Since(pstart), 0, err
+ }
+ ptime := time.Since(pstart)
+
+ // TODO: if I remove this check before executing balTracer.Finalise, the following test fails:
+ // ExecutionSpecBlocktests/shanghai/eip3855_push0/push0/push0_storage_overwrite.json
+ if constructBALForTesting {
+ balTracer.OnBlockFinalization()
+ }
+
+ vstart := time.Now()
+ if err := bc.validator.ValidateState(block, statedb, result, false); err != nil {
+ return result, ptime, time.Since(vstart), err
+ }
+ vtime := time.Since(vstart)
+
+ if constructBALForTesting {
+ // very ugly... deep-copy the block body before setting the block access
+ // list on it to prevent mutating the block instance passed by the caller.
+ block = block.WithAccessList(balTracer.AccessListEncoded(block.NumberU64(), block.Hash()))
+ }
+ return result, ptime, vtime, nil
}
- ptime := time.Since(pstart)
- // Validate the state using the default validator
- vstart := time.Now()
- if err := bc.validator.ValidateState(block, statedb, res, false); err != nil {
- bc.reportBlock(block, res, err)
- return nil, err
+ if block.AccessList() != nil {
+ if block.NumberU64() == 0 {
+ return nil, fmt.Errorf("genesis block cannot have a block access list")
+ }
+ // TODO: rename 'validateBAL' to indicate that it's for validating that the BAL
+ // is present and we are after amsterdam fork. validateBAL=false is only used for
+ // testing BALs in pre-Amsterdam blocks.
+ // Process block using the parent state as reference point
+ pstart := time.Now()
+ statedb.SetExpectedStateRoot(block.Root())
+ statedb.SetNeedBadSharedStorage(needBadSharedStorage)
+ log.Info("Processing block with BAL", "number", block.Number(), "hash", block.Hash())
+ resWithMetrics, err = bc.parallelProcessor.Process(block, statedb, bc.cfg.VmConfig)
+ if err != nil {
+ log.Warn("parallel BAL processing failed, falling back to sequential", "block", block.Number(), "hash", block.Hash(), "err", err)
+ // Reload a fresh statedb for sequential fallback since the previous one might be mutated.
+ fallbackState, fallbackErr := state.New(parentRoot, bc.statedb)
+ if fallbackErr != nil {
+ return nil, fallbackErr
+ }
+ statedb = fallbackState
+ res, ptime, vtime, err = runSequential(statedb, "sequential fallback processing")
+ if err != nil {
+ bc.reportBlock(block, res, err)
+ return nil, err
+ }
+ parallelMetrics = false
+ goto sequentialDone
+ }
+ ptime = time.Since(pstart)
+
+ vstart := time.Now()
+ var err error
+ err = bc.validator.ValidateState(block, statedb, resWithMetrics.ProcessResult, false)
+ if err != nil {
+ // TODO: okay to pass nil here as execution result?
+ bc.reportBlock(block, nil, err)
+ return nil, err
+ }
+ res = resWithMetrics.ProcessResult
+ vtime = time.Since(vstart)
+ } else {
+ parallelMetrics = false
+ var seqErr error
+ res, ptime, vtime, seqErr = runSequential(statedb, "process block")
+ if seqErr != nil {
+ bc.reportBlock(block, res, seqErr)
+ return nil, seqErr
+ }
}
- vtime := time.Since(vstart)
+sequentialDone:
// If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of
// a tight integration to enable running *all* consensus tests through the
@@ -2596,29 +2708,42 @@ func (bc *BlockChain) processBlock(parentRoot common.Hash, block *types.Block, s
return nil, fmt.Errorf("stateless self-validation receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, block.ReceiptHash())
}
}
- xvtime := time.Since(xvstart)
- proctime := time.Since(startTime) // processing + validation + cross validation
+ var proctime time.Duration
+ if parallelMetrics {
+ blockPreprocessingTimer.Update(resWithMetrics.PreProcessTime)
+ blockPrestateLoadTimer.Update(resWithMetrics.PrestateLoadTime)
+ txExecutionTimer.Update(resWithMetrics.ExecTime)
+ stateRootCalctimer.Update(resWithMetrics.RootCalcTime)
+ blockPostprocessingTimer.Update(resWithMetrics.PostProcessTime)
- // Update the metrics touched during block processing and validation
- if metrics.EnabledExpensive() {
- accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
- storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
- if statedb.AccountLoaded != 0 {
- accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
- }
- if statedb.StorageLoaded != 0 {
- storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
+ blockExecutionTimer.Update(ptime) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime)
+
+ accountHashTimer.Update(statedb.AccountHashes)
+ } else {
+ xvtime := time.Since(xvstart)
+ proctime = time.Since(startTime) // processing + validation + cross validation
+
+ // Update the metrics touched during block processing and validation
+ if metrics.EnabledExpensive() {
+ accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
+ storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
+ if statedb.AccountLoaded != 0 {
+ accountReadSingleTimer.Update(statedb.AccountReads / time.Duration(statedb.AccountLoaded))
+ }
+ if statedb.StorageLoaded != 0 {
+ storageReadSingleTimer.Update(statedb.StorageReads / time.Duration(statedb.StorageLoaded))
+ }
+ accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
+ storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
+ accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
}
- accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
- storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
- accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
+ triehash := statedb.AccountHashes // The time spent on tries hashing
+ trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
+ blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
+ blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
+ blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
}
- triehash := statedb.AccountHashes // The time spent on tries hashing
- trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
- blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
- blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
- blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
-
// Write the block to the chain and get the status.
var (
wstart = time.Now()
diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go
index 1c9ba3e991..cf5a4977ce 100644
--- a/core/blockchain_insert.go
+++ b/core/blockchain_insert.go
@@ -62,7 +62,7 @@ func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, sn
context := []interface{}{
"number", end.Number(), "hash", end.Hash(), "miner", end.Coinbase(),
"blocks", st.processed, "txs", txs, "blobs", blobs, "mgas", float64(st.usedGas) / 1000000,
- "elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps, "BAL", end.BAL() != nil,
+ "elapsed", common.PrettyDuration(elapsed), "mgasps", mgasps,
}
blockInsertMgaspsGauge.Update(int64(mgasps))
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index e6e488c468..b69e6f8cc0 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -204,9 +204,9 @@ func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
sidecars := rawdb.ReadBlobSidecars(bc.db, hash, number)
block = block.WithSidecars(sidecars)
- bal := rawdb.ReadBAL(bc.db, hash, number)
- if bal != nil {
- block = block.WithBAL(bal)
+ blockAccessList := rawdb.ReadBlockAccessList(bc.db, hash, number)
+ if blockAccessList != nil {
+ block = block.WithAccessList(blockAccessList)
}
// Cache the found block for next time and return
bc.blockCache.Add(block.Hash(), block)
@@ -481,9 +481,6 @@ func (bc *BlockChain) State() (*state.StateDB, error) {
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
stateDb, err := state.New(root, bc.statedb)
- if bc.cfg.EnableBAL {
- stateDb.InitBlockAccessList()
- }
if err != nil {
return nil, err
}
@@ -506,9 +503,6 @@ func (bc *BlockChain) StateWithCacheAt(root common.Hash) (*state.StateDB, error)
return nil, err
}
stateDb, err := state.NewWithReader(root, bc.statedb, process)
- if bc.cfg.EnableBAL {
- stateDb.InitBlockAccessList()
- }
if err != nil {
return nil, err
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 1d67d54e96..817eb66d31 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -3020,7 +3020,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
e.exist = false
e.values = nil
}
- //t.Logf("block %d; adding destruct\n", e.blocknum)
+ // t.Logf("block %d; adding destruct\n", e.blocknum)
return tx
}
var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction {
@@ -3031,7 +3031,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
e.exist = true
e.values = map[int]int{3: e.blocknum + 1, 4: 4}
}
- //t.Logf("block %d; adding resurrect\n", e.blocknum)
+ // t.Logf("block %d; adding resurrect\n", e.blocknum)
return tx
}
@@ -3061,8 +3061,8 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
// Import the canonical chain
options := DefaultConfig().WithStateScheme(scheme)
options.VmConfig = vm.Config{
- //Debug: true,
- //Tracer: vm.NewJSONLogger(nil, os.Stdout),
+ // Debug: true,
+ // Tracer: vm.NewJSONLogger(nil, os.Stdout),
}
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, options)
if err != nil {
@@ -3201,8 +3201,8 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
// Import the canonical chain
options := DefaultConfig().WithStateScheme(scheme)
options.VmConfig = vm.Config{
- //Debug: true,
- //Tracer: vm.NewJSONLogger(nil, os.Stdout),
+ // Debug: true,
+ // Tracer: vm.NewJSONLogger(nil, os.Stdout),
}
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), gspec, engine, options)
if err != nil {
@@ -4130,7 +4130,7 @@ func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.H
return
}
-func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, finalize func()) (*types.Block, []*types.Receipt, error) {
// Finalize block
c.Finalize(chain, header, state, &body.Transactions, body.Uncles, body.Withdrawals, nil, nil, nil, tracer)
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 408de88152..f185ec1a9d 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -436,7 +436,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
body := types.Body{Transactions: b.txs, Uncles: b.uncles, Withdrawals: b.withdrawals}
- block, _, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts, nil)
+ block, _, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, &body, b.receipts, nil, nil)
if err != nil {
panic(err)
}
@@ -553,7 +553,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
Uncles: b.uncles,
Withdrawals: b.withdrawals,
}
- block, _, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts, nil)
+ block, _, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts, nil, nil)
if err != nil {
panic(err)
}
diff --git a/core/data_availability.go b/core/data_availability.go
index 1f60efffa3..6ec552d7a7 100644
--- a/core/data_availability.go
+++ b/core/data_availability.go
@@ -60,7 +60,7 @@ func IsDataAvailable(chain consensus.ChainHeaderReader, block *types.Block) (err
// refer logic in ValidateBody
if !chain.Config().IsCancun(block.Number(), block.Time()) {
- if len(block.Sidecars()) != 0 {
+ if block.Sidecars() != nil {
return errors.New("sidecars present in block body before cancun")
}
return nil
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index 2028f98edc..d3e7313eae 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -19,21 +19,22 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce math.HexOrDecimal64 `json:"nonce"`
- Timestamp math.HexOrDecimal64 `json:"timestamp"`
- ExtraData hexutil.Bytes `json:"extraData"`
- GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash common.Hash `json:"mixHash"`
- Coinbase common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"number"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce math.HexOrDecimal64 `json:"nonce"`
+ Timestamp math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData hexutil.Bytes `json:"extraData"`
+ GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash common.Hash `json:"mixHash"`
+ Coinbase common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"number"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var enc Genesis
enc.Config = g.Config
@@ -56,27 +57,29 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee)
enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas)
enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed)
+ enc.BlockAccessListHash = g.BlockAccessListHash
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce *math.HexOrDecimal64 `json:"nonce"`
- Timestamp *math.HexOrDecimal64 `json:"timestamp"`
- ExtraData *hexutil.Bytes `json:"extraData"`
- GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash *common.Hash `json:"mixHash"`
- Coinbase *common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"number"`
- GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash *common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce *math.HexOrDecimal64 `json:"nonce"`
+ Timestamp *math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData *hexutil.Bytes `json:"extraData"`
+ GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash *common.Hash `json:"mixHash"`
+ Coinbase *common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"number"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash *common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@@ -133,5 +136,8 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.BlobGasUsed != nil {
g.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
+ if dec.BlockAccessListHash != nil {
+ g.BlockAccessListHash = dec.BlockAccessListHash
+ }
return nil
}
diff --git a/core/genesis.go b/core/genesis.go
index 883db7d751..7f67afe041 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -67,12 +67,13 @@ type Genesis struct {
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
- Number uint64 `json:"number"`
- GasUsed uint64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
- ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
- BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
+ Number uint64 `json:"number"`
+ GasUsed uint64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559
+ ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844
+ BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
+ BlockAccessListHash *common.Hash `json:"blockAccessListHash,omitempty"`
}
// copy copies the genesis.
@@ -153,7 +154,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
if account.Balance != nil {
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
}
- statedb.SetCode(addr, account.Code)
+ statedb.SetCode(addr, account.Code, tracing.CodeChangeUnspecified)
statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
@@ -189,7 +190,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
// already captures the allocations.
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
}
- statedb.SetCode(addr, account.Code)
+ statedb.SetCode(addr, account.Code, tracing.CodeChangeUnspecified)
statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
diff --git a/core/headerchain.go b/core/headerchain.go
index bf47922b63..b8736ca70a 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@@ -72,6 +73,16 @@ type HeaderChain struct {
engine consensus.Engine
}
+// SignBlockAccessList implements consensus.ChainHeaderReader.
+func (hc *HeaderChain) SignBlockAccessList(blockAccessList *bal.BlockAccessList) error {
+ return nil
+}
+
+// VerifyBlockAccessList implements consensus.ChainHeaderReader.
+func (hc *HeaderChain) VerifyBlockAccessList(block *types.Block, bal *bal.BlockAccessList) error {
+ return nil
+}
+
// NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
// to the parent's interrupt semaphore.
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
diff --git a/core/parallel_state_processor.go b/core/parallel_state_processor.go
new file mode 100644
index 0000000000..e58053ba47
--- /dev/null
+++ b/core/parallel_state_processor.go
@@ -0,0 +1,441 @@
+package core
+
+import (
+ "cmp"
+ "errors"
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/systemcontracts"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/types/bal"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "golang.org/x/sync/errgroup"
+)
+
+// ProcessResultWithMetrics wraps ProcessResult with some metrics that are
+// emitted when executing blocks containing access lists.
+type ProcessResultWithMetrics struct {
+ ProcessResult *ProcessResult
+ // the time it took to load modified prestate accounts from disk and instantiate statedbs for execution
+ PreProcessTime time.Duration
+ // the time it took to validate the block post transaction execution and state root calculation
+ PostProcessTime time.Duration
+ // the time it took to hash the state root, including intermediate node reads
+ RootCalcTime time.Duration
+ // the time that it took to load the prestate for accounts that were updated as part of
+ // the state root update
+ PrestateLoadTime time.Duration
+ // the time it took to execute all txs in the block
+ ExecTime time.Duration
+}
+
+// ParallelStateProcessor is used to execute and verify blocks containing
+// access lists.
+type ParallelStateProcessor struct {
+ *StateProcessor
+ vmCfg *vm.Config
+}
+
+// NewParallelStateProcessor returns a new ParallelStateProcessor instance.
+func NewParallelStateProcessor(config *params.ChainConfig, chain *HeaderChain, cfg *vm.Config) ParallelStateProcessor {
+ res := NewStateProcessor(config, chain)
+ return ParallelStateProcessor{
+ res,
+ cfg,
+ }
+}
+
+// called by resultHandler when all transactions have successfully executed.
+// performs post-tx state transition (system contracts and withdrawals)
+// and calculates the ProcessResult, returning it to be sent on resCh
+// by resultHandler
+func (p *ParallelStateProcessor) prepareExecResult(block *types.Block, allStateReads *bal.StateAccesses, tExecStart time.Time, postTxState *state.StateDB, receipts types.Receipts, cfg vm.Config) *ProcessResultWithMetrics {
+ tExec := time.Since(tExecStart)
+ var requests [][]byte
+ tPostprocessStart := time.Now()
+ header := block.Header()
+
+ balTracer, hooks := NewBlockAccessListTracer()
+ tracingStateDB := state.NewHookedState(postTxState, hooks)
+ context := NewEVMBlockContext(header, p.chain, nil)
+
+ evm := vm.NewEVM(context, tracingStateDB, p.config, *p.vmCfg)
+
+ // 1. order the receipts by tx index
+ // 2. correctly calculate the cumulative gas used per receipt, returning bad block error if it goes over the allowed
+ slices.SortFunc(receipts, func(a, b *types.Receipt) int {
+ return cmp.Compare(a.TransactionIndex, b.TransactionIndex)
+ })
+
+ var cumulativeGasUsed uint64
+ var allLogs []*types.Log
+ for _, receipt := range receipts {
+ receipt.CumulativeGasUsed = cumulativeGasUsed + receipt.GasUsed
+ cumulativeGasUsed += receipt.GasUsed
+ if receipt.CumulativeGasUsed > header.GasLimit {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: fmt.Errorf("gas limit exceeded")},
+ }
+ }
+ }
+
+ // Read requests if Prague is enabled.
+ if p.config.IsPrague(block.Number(), block.Time()) && p.chain.config.Parlia == nil {
+ requests = [][]byte{}
+ // EIP-6110
+ if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: err},
+ }
+ }
+
+ // EIP-7002
+ err := ProcessWithdrawalQueue(&requests, evm)
+ if err != nil {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: err},
+ }
+ }
+
+ // EIP-7251
+ err = ProcessConsolidationQueue(&requests, evm)
+ if err != nil {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: err},
+ }
+ }
+ }
+
+ // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
+ // Classify transactions into common and system transactions
+ posa, isPoSA := p.chain.engine.(consensus.PoSA)
+ commonTxs := make([]*types.Transaction, 0, len(block.Transactions()))
+ systemTxs := make([]*types.Transaction, 0, 2) // usually 2 system txs: validator set + system reward
+
+ for _, tx := range block.Transactions() {
+ if isPoSA {
+ if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: fmt.Errorf("could not check if tx is system tx [%v]: %w", tx.Hash().Hex(), err)},
+ }
+ } else if isSystemTx {
+ systemTxs = append(systemTxs, tx)
+ continue
+ }
+ }
+ commonTxs = append(commonTxs, tx)
+ }
+ systemTxCount := len(systemTxs)
+
+ var usedGas uint64 = cumulativeGasUsed
+ err := p.chain.engine.Finalize(p.chain, header, tracingStateDB, &commonTxs, block.Uncles(), block.Withdrawals(), (*[]*types.Receipt)(&receipts), &systemTxs, &usedGas, cfg.Tracer)
+ if err != nil {
+ log.Error("Finalize failed", "error", err.Error())
+ }
+ // invoke Finalise so that withdrawals are accounted for in the state diff
+ postTxState.Finalise(true)
+
+ balTracer.OnBlockFinalization()
+ diff, stateReads := balTracer.builder.FinalizedIdxChanges()
+ allStateReads.Merge(stateReads)
+ balIdx := len(block.Transactions()) - systemTxCount + 1
+ if err := postTxState.BlockAccessList().ValidateStateDiffRange(balIdx, len(block.Transactions())+1, diff); err != nil {
+ log.Error("validate state diff on post-tx", "idx", balIdx, "err", err)
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: fmt.Errorf("validate state diff on post-tx: idx %d, err %w", balIdx, err)},
+ }
+ }
+
+ if err := postTxState.BlockAccessList().ValidateStateReads(*allStateReads); err != nil {
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{Error: err},
+ }
+ }
+
+ tPostprocess := time.Since(tPostprocessStart)
+
+ for _, receipt := range receipts {
+ allLogs = append(allLogs, receipt.Logs...)
+ }
+
+ return &ProcessResultWithMetrics{
+ ProcessResult: &ProcessResult{
+ Receipts: receipts,
+ Requests: requests,
+ Logs: allLogs,
+ GasUsed: usedGas,
+ },
+ PostProcessTime: tPostprocess,
+ ExecTime: tExec,
+ }
+}
+
+type txExecResult struct {
+ idx int // transaction index
+ receipt *types.Receipt
+ err error // non-EVM error which would render the block invalid
+
+ stateReads bal.StateAccesses
+}
+
+type txExecRequest struct {
+ idx int
+ balIdx int
+ tx *types.Transaction
+}
+
+// resultHandler polls until all transactions have finished executing and the
+// state root calculation is complete. The result is emitted on resCh.
+func (p *ParallelStateProcessor) resultHandler(block *types.Block, preTxStateReads bal.StateAccesses, postTxState *state.StateDB, tExecStart time.Time, txResCh <-chan txExecResult, stateRootCalcResCh <-chan stateRootCalculationResult, resCh chan *ProcessResultWithMetrics, cfg vm.Config, expectedResults int) {
+ // 1. if the block has transactions, receive the execution results from all of them and return an error on resCh if any txs err'd
+ // 2. once all txs are executed, compute the post-tx state transition and produce the ProcessResult sending it on resCh (or an error if the post-tx state didn't match what is reported in the BAL)
+ var receipts []*types.Receipt
+ gp := new(GasPool)
+ gp.SetGas(block.GasLimit())
+ var execErr error
+ var numTxComplete int
+
+ allReads := make(bal.StateAccesses)
+ allReads.Merge(preTxStateReads)
+ if expectedResults > 0 {
+ loop:
+ for {
+ select {
+ case res := <-txResCh:
+ if execErr == nil {
+ if res.err != nil {
+ execErr = res.err
+ } else {
+ if err := gp.SubGas(res.receipt.GasUsed); err != nil {
+ execErr = err
+ } else {
+ receipts = append(receipts, res.receipt)
+ allReads.Merge(res.stateReads)
+ }
+ }
+ }
+ numTxComplete++
+ if numTxComplete == expectedResults {
+ break loop
+ }
+ }
+ }
+
+ if execErr != nil {
+ resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: execErr}}
+ return
+ }
+ }
+
+ execResults := p.prepareExecResult(block, &allReads, tExecStart, postTxState, receipts, cfg)
+ rootCalcRes := <-stateRootCalcResCh
+
+ if execResults.ProcessResult.Error != nil {
+ resCh <- execResults
+ } else if rootCalcRes.err != nil {
+ resCh <- &ProcessResultWithMetrics{ProcessResult: &ProcessResult{Error: rootCalcRes.err}}
+ } else {
+ execResults.RootCalcTime = rootCalcRes.rootCalcTime
+ execResults.PrestateLoadTime = rootCalcRes.prestateLoadTime
+ resCh <- execResults
+ }
+}
+
+type stateRootCalculationResult struct {
+ err error
+ prestateLoadTime time.Duration
+ rootCalcTime time.Duration
+ root common.Hash
+}
+
+// calcAndVerifyRoot performs the post-state root hash calculation, verifying
+// it against what is reported by the block and returning a result on resCh.
+func (p *ParallelStateProcessor) calcAndVerifyRoot(preState *state.StateDB, block *types.Block, resCh chan stateRootCalculationResult) {
+ // calculate and apply the block state modifications
+ root, prestateLoadTime, rootCalcTime := preState.BlockAccessList().StateRoot(preState)
+
+ res := stateRootCalculationResult{
+ root: root,
+ prestateLoadTime: prestateLoadTime,
+ rootCalcTime: rootCalcTime,
+ }
+
+ if root != block.Root() {
+ res.err = fmt.Errorf("state root mismatch. local: %x. remote: %x, db error: %v", root, block.Root(), preState.Error())
+ }
+ resCh <- res
+}
+
+// execTx executes single transaction returning a result which includes state accessed/modified
+func (p *ParallelStateProcessor) execTx(block *types.Block, tx *types.Transaction, idx int, balIdx int, db *state.StateDB, signer types.Signer) *txExecResult {
+ header := block.Header()
+ balTracer, hooks := NewBlockAccessListTracer()
+ tracingStateDB := state.NewHookedState(db, hooks)
+ context := NewEVMBlockContext(header, p.chain, nil)
+ cfg := vm.Config{
+ Tracer: hooks,
+ NoBaseFee: p.vmCfg.NoBaseFee,
+ EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
+ ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
+ StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
+ }
+ cfg.Tracer = hooks
+ evm := vm.NewEVM(context, tracingStateDB, p.config, cfg)
+
+ msg, err := TransactionToMessage(tx, signer, header.BaseFee)
+ if err != nil {
+ err = fmt.Errorf("could not apply tx %d [%v]: %w", idx, tx.Hash().Hex(), err)
+ return &txExecResult{err: err}
+ }
+ sender, _ := types.Sender(signer, tx)
+ db.SetTxSender(sender)
+ db.SetTxContext(tx.Hash(), idx)
+ db.SetAccessListIndex(balIdx)
+
+ gp := new(GasPool)
+ gp.SetGas(block.GasLimit())
+ var gasUsed uint64
+ receipt, err := ApplyTransactionWithEVM(msg, gp, db, block.Number(), block.Hash(), context.Time, tx, &gasUsed, evm)
+ if err != nil {
+ err := fmt.Errorf("could not apply tx %d [%v]: %w", idx, tx.Hash().Hex(), err)
+ return &txExecResult{err: err}
+ }
+ diff, accesses := balTracer.builder.FinalizedIdxChanges()
+ if err := db.BlockAccessList().ValidateStateDiff(balIdx, diff); err != nil {
+ return &txExecResult{err: fmt.Errorf("validate state diff on tx: idx %d, balIdx %d, err %w", idx+1, balIdx, err)}
+ }
+
+ receipt.Bloom = types.CreateBloom(receipt)
+ return &txExecResult{
+ idx: idx,
+ receipt: receipt,
+ stateReads: accesses,
+ }
+}
+
+// Process performs EVM execution and state root computation for a block which is known
+// to contain an access list.
+func (p *ParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (*ProcessResultWithMetrics, error) {
+ var (
+ header = block.Header()
+ resCh = make(chan *ProcessResultWithMetrics)
+ signer = types.MakeSigner(p.config, header.Number, header.Time)
+ )
+
+ txResCh := make(chan txExecResult)
+ pStart := time.Now()
+ var (
+ tPreprocess time.Duration // time to create a set of prestates for parallel transaction execution
+ tExecStart time.Time
+ rootCalcResultCh = make(chan stateRootCalculationResult)
+ )
+
+ // Mutate the block and state according to any hard-fork specs
+ if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
+ misc.ApplyDAOHardFork(statedb)
+ }
+ alReader := state.NewBALReader(block, statedb)
+ statedb.SetBlockAccessList(alReader)
+
+ var (
+ context vm.BlockContext
+ )
+ log.Debug("parallel state processor", "block", block.Number(), "hash", block.Hash(), "signData", common.Bytes2Hex(block.AccessList().SignData))
+
+ balTracer, hooks := NewBlockAccessListTracer()
+ tracingStateDB := state.NewHookedState(statedb, hooks)
+ originalStateDB := statedb.Copy()
+ // TODO: figure out exactly why we need to set the hooks on the TracingStateDB and the vm.Config
+ cfg = vm.Config{
+ Tracer: hooks,
+ NoBaseFee: p.vmCfg.NoBaseFee,
+ EnablePreimageRecording: p.vmCfg.EnablePreimageRecording,
+ ExtraEips: slices.Clone(p.vmCfg.ExtraEips),
+ StatelessSelfValidation: p.vmCfg.StatelessSelfValidation,
+ }
+ cfg.Tracer = hooks
+
+ context = NewEVMBlockContext(header, p.chain, nil)
+ evm := vm.NewEVM(context, tracingStateDB, p.chain.config, cfg)
+
+ lastBlock := p.chain.GetHeaderByHash(block.ParentHash())
+ if lastBlock == nil {
+ return nil, errors.New("could not get parent block")
+ }
+ // Handle upgrade built-in system contract code
+ systemcontracts.TryUpdateBuildInSystemContract(p.config, block.Number(), lastBlock.Time, block.Time(), tracingStateDB, true)
+
+ if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
+ ProcessBeaconBlockRoot(*beaconRoot, evm)
+ }
+ if p.config.IsPrague(block.Number(), block.Time()) || p.config.IsVerkle(block.Number(), block.Time()) {
+ ProcessParentBlockHash(block.ParentHash(), evm)
+ }
+ // TODO: weird that I have to manually call finalize here
+ balTracer.OnPreTxExecutionDone()
+
+ diff, stateReads := balTracer.builder.FinalizedIdxChanges()
+ if err := statedb.BlockAccessList().ValidateStateDiff(0, diff); err != nil {
+ return nil, fmt.Errorf("validate state diff on pre-tx: idx 0 , err %w", err)
+ }
+ // compute the post-tx state prestate (before applying final block system calls and eip-4895 withdrawals)
+ // the post-tx state transition is verified by resultHandler
+ postTxState := originalStateDB.Copy()
+
+ posa, isPoSA := p.chain.engine.(consensus.PoSA)
+ var systemTxCount int
+ execJobs := make([]txExecRequest, 0, len(block.Transactions())-systemTxCount)
+ for i, tx := range block.Transactions() {
+ if isPoSA {
+ isSystemTx, err := posa.IsSystemTransaction(tx, header)
+ if err != nil {
+ return nil, fmt.Errorf("could not check if tx is system tx [%v]: %w", tx.Hash().Hex(), err)
+ }
+ if isSystemTx {
+ systemTxCount++
+ continue
+ }
+ balIdx := i - systemTxCount + 1
+ execJobs = append(execJobs, txExecRequest{idx: i, balIdx: balIdx, tx: tx})
+ }
+ if p.config.IsCancun(block.Number(), block.Time()) && systemTxCount > 0 {
+ return nil, fmt.Errorf("normal tx %d [%v] after systemTx", i, tx.Hash().Hex())
+ }
+ }
+
+ postTxState.SetAccessListIndex(len(block.Transactions()) - systemTxCount + 1)
+ tPreprocess = time.Since(pStart)
+ // execute transactions and state root calculation in parallel
+
+ tExecStart = time.Now()
+
+ expectedResults := len(execJobs)
+ go p.resultHandler(block, stateReads, postTxState, tExecStart, txResCh, rootCalcResultCh, resCh, cfg, expectedResults)
+ var workers errgroup.Group
+ startingState := originalStateDB.Copy()
+ for _, job := range execJobs {
+ workers.Go(func() error {
+ res := p.execTx(block, job.tx, job.idx, job.balIdx, startingState.Copy(), signer)
+ txResCh <- *res
+ return nil
+ })
+ }
+
+ go p.calcAndVerifyRoot(statedb, block, rootCalcResultCh)
+
+ res := <-resCh
+ if res.ProcessResult.Error != nil {
+ return nil, res.ProcessResult.Error
+ }
+ res.PreProcessTime = tPreprocess
+ // res.PreProcessLoadTime = tPreprocessLoad
+ return res, nil
+}
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index d7250128d5..b0b979ec4b 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -918,6 +918,47 @@ func ReadBlobSidecarsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.R
return data
}
+// ReadBlockAccessListRLP retrieves all the block access list belonging to a block in RLP encoding.
+func ReadBlockAccessListRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
+ // BAL is only in kv DB, will not be put into ancient DB
+ data, _ := db.Get(blockAccessListKey(number, hash))
+ return data
+}
+
+// ReadBlockAccessList retrieves the block access list belonging to a block.
+func ReadBlockAccessList(db ethdb.Reader, hash common.Hash, number uint64) *types.BlockAccessListEncode {
+ data := ReadBlockAccessListRLP(db, hash, number)
+ if len(data) == 0 {
+ return nil
+ }
+ var ret types.BlockAccessListEncode
+ if err := rlp.DecodeBytes(data, &ret); err != nil {
+ log.Error("Invalid block access list RLP", "hash", hash, "err", err, "data len", len(data))
+ return nil
+ }
+ return &ret
+}
+
+func WriteBlockAccessList(db ethdb.KeyValueWriter, hash common.Hash, number uint64, bal *types.BlockAccessListEncode) {
+ if bal == nil {
+ return
+ }
+ data, err := rlp.EncodeToBytes(bal)
+ if err != nil {
+ log.Crit("Failed to encode block access list", "err", err)
+ }
+
+ if err := db.Put(blockAccessListKey(number, hash), data); err != nil {
+ log.Crit("Failed to store block access list", "err", err)
+ }
+}
+
+func DeleteBlockAccessList(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
+ if err := db.Delete(blockAccessListKey(number, hash)); err != nil {
+ log.Crit("Failed to delete block access list", "err", err)
+ }
+}
+
// ReadBlobSidecars retrieves all the transaction blobs belonging to a block.
func ReadBlobSidecars(db ethdb.Reader, hash common.Hash, number uint64) types.BlobSidecars {
data := ReadBlobSidecarsRLP(db, hash, number)
@@ -960,47 +1001,6 @@ func DeleteBlobSidecars(db ethdb.KeyValueWriter, hash common.Hash, number uint64
}
}
-// ReadBALRLP retrieves all the block access list belonging to a block in RLP encoding.
-func ReadBALRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
- // BAL is only in kv DB, will not be put into ancient DB
- data, _ := db.Get(blockBALKey(number, hash))
- return data
-}
-
-// ReadBAL retrieves the block access list belonging to a block.
-func ReadBAL(db ethdb.Reader, hash common.Hash, number uint64) *types.BlockAccessListEncode {
- data := ReadBALRLP(db, hash, number)
- if len(data) == 0 {
- return nil
- }
- var ret types.BlockAccessListEncode
- if err := rlp.DecodeBytes(data, &ret); err != nil {
- log.Error("Invalid BAL RLP", "hash", hash, "err", err)
- return nil
- }
- return &ret
-}
-
-func WriteBAL(db ethdb.KeyValueWriter, hash common.Hash, number uint64, bal *types.BlockAccessListEncode) {
- if bal == nil {
- return
- }
- data, err := rlp.EncodeToBytes(bal)
- if err != nil {
- log.Crit("Failed to encode block BAL", "err", err)
- }
-
- if err := db.Put(blockBALKey(number, hash), data); err != nil {
- log.Crit("Failed to store block BAL", "err", err)
- }
-}
-
-func DeleteBAL(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
- if err := db.Delete(blockBALKey(number, hash)); err != nil {
- log.Crit("Failed to delete block BAL", "err", err)
- }
-}
-
// WriteAncientHeaderChain writes the supplied headers along with nil block
// bodies and receipts into the ancient store. It's supposed to be used for
// storing chain segment before the chain cutoff.
@@ -1043,7 +1043,6 @@ func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
DeleteBody(db, hash, number)
DeleteTd(db, hash, number)
DeleteBlobSidecars(db, hash, number) // it is safe to delete non-exist blob
- DeleteBAL(db, hash, number)
}
// DeleteBlockWithoutNumber removes all block data associated with a hash, except
@@ -1054,7 +1053,6 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
DeleteBody(db, hash, number)
DeleteTd(db, hash, number)
DeleteBlobSidecars(db, hash, number)
- DeleteBAL(db, hash, number)
}
const badBlockToKeep = 10
diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go
index 9b7cd9ae12..bc4f45a364 100644
--- a/core/rawdb/accessors_chain_test.go
+++ b/core/rawdb/accessors_chain_test.go
@@ -1122,376 +1122,3 @@ func TestHeadersRLPStorage(t *testing.T) {
checkSequence(1, 1) // Only block 1
checkSequence(1, 2) // Genesis + block 1
}
-
-// Tests BAL (Block Access List) storage and retrieval operations.
-func TestBALStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create test BAL data
- bal := &types.BlockAccessListEncode{
- Version: 1,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- TxIndex: 0,
- Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- },
- },
- {
- TxIndex: 1,
- Address: common.HexToAddress("0x2222222222222222222222222222222222222222"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x03"), TxIndex: 2, Dirty: false},
- },
- },
- },
- }
-
- // Fill SignData with test data
- copy(bal.SignData, []byte("test_signature_data_for_bal_testing_12345678901234567890123456789"))
-
- hash := common.HexToHash("0x123456789abcdef")
- number := uint64(42)
-
- // Test non-existent BAL retrieval
- if entry := ReadBAL(db, hash, number); entry != nil {
- t.Fatalf("Non-existent BAL returned: %v", entry)
- }
- if entry := ReadBALRLP(db, hash, number); len(entry) != 0 {
- t.Fatalf("Non-existent raw BAL returned: %v", entry)
- }
-
- // Test BAL storage and retrieval
- WriteBAL(db, hash, number, bal)
- if entry := ReadBAL(db, hash, number); entry == nil {
- t.Fatalf("Stored BAL not found")
- } else if !balEqual(entry, bal) {
- t.Fatalf("Retrieved BAL mismatch: have %v, want %v", entry, bal)
- }
-
- // Test raw BAL retrieval
- if entry := ReadBALRLP(db, hash, number); len(entry) == 0 {
- t.Fatalf("Stored raw BAL not found")
- }
-
- // Test BAL deletion
- DeleteBAL(db, hash, number)
- if entry := ReadBAL(db, hash, number); entry != nil {
- t.Fatalf("Deleted BAL still returned: %v", entry)
- }
- if entry := ReadBALRLP(db, hash, number); len(entry) != 0 {
- t.Fatalf("Deleted raw BAL still returned: %v", entry)
- }
-}
-
-func TestBALRLPStorage(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Test different BAL configurations
- testCases := []struct {
- name string
- bal *types.BlockAccessListEncode
- hash common.Hash
- number uint64
- }{
- {
- name: "empty accounts",
- bal: &types.BlockAccessListEncode{
- Version: 0,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{},
- },
- hash: common.HexToHash("0x1111"),
- number: 1,
- },
- {
- name: "single account with multiple storage items",
- bal: &types.BlockAccessListEncode{
- Version: 2,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- TxIndex: 0,
- Address: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x0a"), TxIndex: 0, Dirty: true},
- {Key: common.HexToHash("0x0b"), TxIndex: 1, Dirty: false},
- {Key: common.HexToHash("0x0c"), TxIndex: 2, Dirty: true},
- },
- },
- },
- },
- hash: common.HexToHash("0x2222"),
- number: 2,
- },
- {
- name: "multiple accounts",
- bal: &types.BlockAccessListEncode{
- Version: ^uint32(0), // Max uint32 value
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- TxIndex: 0,
- Address: common.HexToAddress("0x1111111111111111111111111111111111111111"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- {
- TxIndex: 1,
- Address: common.HexToAddress("0x3333333333333333333333333333333333333333"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x04"), TxIndex: 3, Dirty: true},
- {Key: common.HexToHash("0x05"), TxIndex: 4, Dirty: false},
- },
- },
- {
- TxIndex: 2,
- Address: common.HexToAddress("0x4444444444444444444444444444444444444444"),
- StorageItems: []types.StorageAccessItem{},
- },
- },
- },
- hash: common.HexToHash("0x3333"),
- number: 100,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- // Fill SignData with unique test data
- sigData := fmt.Sprintf("test_signature_for_%s_123456789012345678901234567890123456789012345678901234567890", tc.name)
- copy(tc.bal.SignData, []byte(sigData))
-
- // Store BAL
- WriteBAL(db, tc.hash, tc.number, tc.bal)
-
- // Test RLP retrieval
- rawData := ReadBALRLP(db, tc.hash, tc.number)
- if len(rawData) == 0 {
- t.Fatalf("Failed to store/retrieve raw BAL data")
- }
-
- // Test structured retrieval
- retrieved := ReadBAL(db, tc.hash, tc.number)
- if retrieved == nil {
- t.Fatalf("Failed to retrieve structured BAL")
- }
-
- // Compare values
- if !balEqual(retrieved, tc.bal) {
- t.Fatalf("Retrieved BAL doesn't match stored BAL")
- }
-
- // Test deletion
- DeleteBAL(db, tc.hash, tc.number)
- if ReadBAL(db, tc.hash, tc.number) != nil {
- t.Fatalf("BAL not properly deleted")
- }
- })
- }
-}
-
-func TestBALCorruptedData(t *testing.T) {
- db := NewMemoryDatabase()
- hash := common.HexToHash("0x9999")
- number := uint64(123)
-
- // Store corrupted RLP data directly
- corruptedData := []byte{0xff, 0xff, 0xff, 0xff} // Invalid RLP
- if err := db.Put(blockBALKey(number, hash), corruptedData); err != nil {
- t.Fatalf("Failed to store corrupted data: %v", err)
- }
-
- // ReadBALRLP should return the corrupted data
- rawData := ReadBALRLP(db, hash, number)
- if !bytes.Equal(rawData, corruptedData) {
- t.Fatalf("ReadBALRLP should return raw data even if corrupted")
- }
-
- // ReadBAL should return nil for corrupted data
- bal := ReadBAL(db, hash, number)
- if bal != nil {
- t.Fatalf("ReadBAL should return nil for corrupted data, got: %v", bal)
- }
-}
-
-func TestBALLargeData(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Create BAL with large amount of data
- accounts := make([]types.AccountAccessListEncode, 1000)
- for i := 0; i < 1000; i++ {
- storageItems := make([]types.StorageAccessItem, 10)
- for j := 0; j < 10; j++ {
- storageItems[j] = types.StorageAccessItem{
- Key: common.BigToHash(big.NewInt(int64(i*10 + j))),
- TxIndex: uint32(i*10 + j),
- Dirty: (i+j)%2 == 0,
- }
- }
- accounts[i] = types.AccountAccessListEncode{
- TxIndex: uint32(i),
- Address: common.BigToAddress(big.NewInt(int64(i))),
- StorageItems: storageItems,
- }
- }
-
- bal := &types.BlockAccessListEncode{
- Version: 12345,
- SignData: make([]byte, 65),
- Accounts: accounts,
- }
-
- // Fill SignData
- copy(bal.SignData, []byte("large_data_test_signature_123456789012345678901234567890123456789"))
-
- hash := common.HexToHash("0xaaaa")
- number := uint64(999)
-
- // Test storage and retrieval of large data
- WriteBAL(db, hash, number, bal)
-
- retrieved := ReadBAL(db, hash, number)
- if retrieved == nil {
- t.Fatalf("Failed to retrieve large BAL data")
- }
-
- if !balEqual(retrieved, bal) {
- t.Fatalf("Large BAL data integrity check failed")
- }
-
- // Test deletion
- DeleteBAL(db, hash, number)
- if ReadBAL(db, hash, number) != nil {
- t.Fatalf("Large BAL data not properly deleted")
- }
-}
-
-func TestBALMultipleBlocks(t *testing.T) {
- db := NewMemoryDatabase()
-
- // Store BALs for multiple blocks
- blocks := []struct {
- hash common.Hash
- number uint64
- bal *types.BlockAccessListEncode
- }{
- {
- hash: common.HexToHash("0xaaaa"),
- number: 1,
- bal: &types.BlockAccessListEncode{
- Version: 1,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- TxIndex: 0,
- Address: common.HexToAddress("0x1111111111111111111111111111111111111111"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x01"), TxIndex: 0, Dirty: false},
- },
- },
- },
- },
- },
- {
- hash: common.HexToHash("0xbbbb"),
- number: 2,
- bal: &types.BlockAccessListEncode{
- Version: 2,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{
- {
- TxIndex: 0,
- Address: common.HexToAddress("0x2222222222222222222222222222222222222222"),
- StorageItems: []types.StorageAccessItem{
- {Key: common.HexToHash("0x02"), TxIndex: 1, Dirty: true},
- },
- },
- },
- },
- },
- {
- hash: common.HexToHash("0xcccc"),
- number: 3,
- bal: &types.BlockAccessListEncode{
- Version: 3,
- SignData: make([]byte, 65),
- Accounts: []types.AccountAccessListEncode{},
- },
- },
- }
-
- // Store all BALs
- for i, block := range blocks {
- sigData := fmt.Sprintf("signature_for_block_%d_123456789012345678901234567890123456789012345678901234567890", i)
- copy(block.bal.SignData, []byte(sigData))
- WriteBAL(db, block.hash, block.number, block.bal)
- }
-
- // Verify all can be retrieved independently
- for i, block := range blocks {
- retrieved := ReadBAL(db, block.hash, block.number)
- if retrieved == nil {
- t.Fatalf("Failed to retrieve BAL for block %d", i)
- }
- if !balEqual(retrieved, block.bal) {
- t.Fatalf("BAL mismatch for block %d", i)
- }
- }
-
- // Delete middle block
- DeleteBAL(db, blocks[1].hash, blocks[1].number)
-
- // Verify first and third blocks still exist
- if ReadBAL(db, blocks[0].hash, blocks[0].number) == nil {
- t.Fatalf("Block 0 BAL was incorrectly deleted")
- }
- if ReadBAL(db, blocks[1].hash, blocks[1].number) != nil {
- t.Fatalf("Block 1 BAL was not deleted")
- }
- if ReadBAL(db, blocks[2].hash, blocks[2].number) == nil {
- t.Fatalf("Block 2 BAL was incorrectly deleted")
- }
-}
-
-// Helper function to compare two BlockAccessListEncode structs
-func balEqual(a, b *types.BlockAccessListEncode) bool {
- if a == nil && b == nil {
- return true
- }
- if a == nil || b == nil {
- return false
- }
- if a.Version != b.Version {
- return false
- }
- if !bytes.Equal(a.SignData, b.SignData) {
- return false
- }
- if len(a.Accounts) != len(b.Accounts) {
- return false
- }
- for i, accountA := range a.Accounts {
- accountB := b.Accounts[i]
- if accountA.TxIndex != accountB.TxIndex {
- return false
- }
- if accountA.Address != accountB.Address {
- return false
- }
- if len(accountA.StorageItems) != len(accountB.StorageItems) {
- return false
- }
- for j, storageA := range accountA.StorageItems {
- storageB := accountB.StorageItems[j]
- if storageA.Key != storageB.Key || storageA.TxIndex != storageB.TxIndex || storageA.Dirty != storageB.Dirty {
- return false
- }
- }
- }
- return true
-}
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 9af890216e..56d8ed6f37 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -652,7 +652,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
tds stat
numHashPairings stat
blobSidecars stat
- bals stat
hashNumPairings stat
legacyTries stat
stateLookups stat
@@ -707,8 +706,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
tds.Add(size)
case bytes.HasPrefix(key, BlockBlobSidecarsPrefix):
blobSidecars.Add(size)
- case bytes.HasPrefix(key, BlockBALPrefix):
- bals.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
@@ -845,7 +842,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
{"Key-Value store", "BlobSidecars", blobSidecars.Size(), blobSidecars.Count()},
- {"Key-Value store", "Block access list", bals.Size(), bals.Count()},
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index beb97f5b42..d94cd2ecae 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -143,7 +143,7 @@ var (
BlockBlobSidecarsPrefix = []byte("blobs")
- BlockBALPrefix = []byte("bal") // blockBALPrefix + blockNumber (uint64 big endian) + blockHash -> block access list
+ BlockAccessListPrefix = []byte("bal")
// new log index
filterMapsPrefix = "fm-"
@@ -205,6 +205,11 @@ func blockBodyKey(number uint64, hash common.Hash) []byte {
return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
+// blockBALKey = blockBALPrefix + blockNumber (uint64 big endian) + blockHash
+func blockAccessListKey(number uint64, hash common.Hash) []byte {
+ return append(append(BlockAccessListPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
+}
+
// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash
func blockReceiptsKey(number uint64, hash common.Hash) []byte {
return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
@@ -215,11 +220,6 @@ func blockBlobSidecarsKey(number uint64, hash common.Hash) []byte {
return append(append(BlockBlobSidecarsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
}
-// blockBALKey = blockBALPrefix + blockNumber (uint64 big endian) + blockHash
-func blockBALKey(number uint64, hash common.Hash) []byte {
- return append(append(BlockBALPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
-}
-
// txLookupKey = txLookupPrefix + hash
func txLookupKey(hash common.Hash) []byte {
return append(txLookupPrefix, hash.Bytes()...)
diff --git a/core/state/bal_reader.go b/core/state/bal_reader.go
new file mode 100644
index 0000000000..38d0f0a6d8
--- /dev/null
+++ b/core/state/bal_reader.go
@@ -0,0 +1,464 @@
+package state
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/types/bal"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/holiman/uint256"
+)
+
+// TODO: probably unnecessary to cache the resolved state object here as it will already be in the db cache?
+// ^ experiment with the performance of keeping this as-is vs just using the db cache.
+type prestateResolver struct {
+ inProgress map[common.Address]chan struct{}
+ resolved sync.Map
+ ctx context.Context
+ cancel func()
+}
+
+func (p *prestateResolver) resolve(r Reader, addrs []common.Address) {
+ p.inProgress = make(map[common.Address]chan struct{})
+ p.ctx, p.cancel = context.WithCancel(context.Background())
+
+ for _, addr := range addrs {
+ p.inProgress[addr] = make(chan struct{})
+ }
+
+ for _, addr := range addrs {
+ resolveAddr := addr
+ go func() {
+ select {
+ case <-p.ctx.Done():
+ return
+ default:
+ }
+
+ acct, err := r.Account(resolveAddr)
+ if err != nil {
+ log.Error("Failed to get account", "address", resolveAddr, "error", err)
+ // TODO: what do here?
+ }
+ p.resolved.Store(resolveAddr, acct)
+ close(p.inProgress[resolveAddr])
+ }()
+ }
+}
+
+func (p *prestateResolver) account(addr common.Address) *types.StateAccount {
+ if _, ok := p.inProgress[addr]; !ok {
+ return nil
+ }
+
+ select {
+ case <-p.inProgress[addr]:
+ }
+ res, exist := p.resolved.Load(addr)
+ if !exist {
+ return nil
+ }
+ return res.(*types.StateAccount)
+}
+
+func (r *BALReader) initObjFromDiff(db *StateDB, addr common.Address, a *types.StateAccount, diff *bal.AccountMutations) *stateObject {
+ var acct *types.StateAccount
+ if a == nil {
+ acct = &types.StateAccount{
+ Nonce: 0,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash[:],
+ }
+ } else {
+ acct = a.Copy()
+ }
+ if diff == nil {
+ return newObject(db, addr, acct)
+ }
+
+ if diff.Nonce != nil {
+ acct.Nonce = *diff.Nonce
+ }
+ if diff.Balance != nil {
+ acct.Balance = new(uint256.Int).Set(diff.Balance)
+ }
+ obj := newObject(db, addr, acct)
+ if diff.Code != nil {
+ obj.setCode(crypto.Keccak256Hash(diff.Code), diff.Code)
+ }
+ if diff.StorageWrites != nil {
+ for key, val := range diff.StorageWrites {
+ obj.pendingStorage[key] = val
+ }
+ }
+ if obj.empty() {
+ return nil
+ }
+ return obj
+}
+
+func (r *BALReader) initMutatedObjFromDiff(db *StateDB, addr common.Address, a *types.StateAccount, diff *bal.AccountMutations) *stateObject {
+ var acct *types.StateAccount
+ if a == nil {
+ acct = &types.StateAccount{
+ Nonce: 0,
+ Balance: uint256.NewInt(0),
+ Root: types.EmptyRootHash,
+ CodeHash: types.EmptyCodeHash[:],
+ }
+ } else {
+ acct = a.Copy()
+ }
+ obj := newObject(db, addr, acct)
+ if diff.Nonce != nil {
+ obj.SetNonce(*diff.Nonce)
+ }
+ if diff.Balance != nil {
+ obj.SetBalance(new(uint256.Int).Set(diff.Balance))
+ }
+ if diff.Code != nil {
+ obj.SetCode(crypto.Keccak256Hash(diff.Code), diff.Code)
+ }
+ if diff.StorageWrites != nil {
+ for key, val := range diff.StorageWrites {
+ obj.SetState(key, val)
+ }
+ }
+ return obj
+}
+
+// BALReader provides methods for reading account state from a block access
+// list. State values returned from the Reader methods must not be modified.
+type BALReader struct {
+ block *types.Block
+ accesses map[common.Address]*bal.AccountAccess
+ prestateReader prestateResolver
+}
+
+// NewBALReader constructs a new reader from an access list. db is expected to have been instantiated with a reader.
+func NewBALReader(block *types.Block, db *StateDB) *BALReader {
+ r := &BALReader{accesses: make(map[common.Address]*bal.AccountAccess), block: block}
+ for _, acctDiff := range *block.AccessList().AccessList {
+ r.accesses[acctDiff.Address] = &acctDiff
+ }
+ r.prestateReader.resolve(db.Reader(), r.ModifiedAccounts())
+ return r
+}
+
+// ModifiedAccounts returns a list of all accounts with mutations in the access list
+func (r *BALReader) ModifiedAccounts() (res []common.Address) {
+ for addr, access := range r.accesses {
+ if len(access.NonceChanges) != 0 || len(access.CodeChanges) != 0 || len(access.StorageChanges) != 0 || len(access.BalanceChanges) != 0 {
+ res = append(res, addr)
+ }
+ }
+ return res
+}
+
+func (r *BALReader) ValidateStateReads(allReads bal.StateAccesses) error {
+ // 1. remove any slots from 'allReads' which were written
+ // 2. validate that the read set in the BAL matches 'allReads' exactly
+ for addr, reads := range allReads {
+ balAcctDiff := r.readAccountDiff(addr, len(r.block.Transactions())+2)
+ if balAcctDiff != nil {
+ for writeSlot := range balAcctDiff.StorageWrites {
+ delete(reads, writeSlot)
+ }
+ }
+ if _, ok := r.accesses[addr]; !ok {
+ return fmt.Errorf("%x wasn't in BAL", addr)
+ }
+
+ expectedReads := r.accesses[addr].StorageReads
+ if len(reads) != len(expectedReads) {
+ return fmt.Errorf("mismatch between the number of computed reads and number of expected reads")
+ }
+
+ for _, slot := range expectedReads {
+ if _, ok := reads[slot]; !ok {
+ return fmt.Errorf("expected read is missing from BAL")
+ }
+ }
+ }
+
+ // TODO: where do we validate that the storage read/write sets are distinct?
+
+ return nil
+}
+
+func (r *BALReader) AccessedState() (res map[common.Address]map[common.Hash]struct{}) {
+ res = make(map[common.Address]map[common.Hash]struct{})
+ for addr, accesses := range r.accesses {
+ if len(accesses.StorageReads) > 0 {
+ res[addr] = make(map[common.Hash]struct{})
+ for _, slot := range accesses.StorageReads {
+ res[addr][slot] = struct{}{}
+ }
+ } else if len(accesses.BalanceChanges) == 0 && len(accesses.NonceChanges) == 0 && len(accesses.StorageChanges) == 0 && len(accesses.CodeChanges) == 0 {
+ res[addr] = make(map[common.Hash]struct{})
+ }
+ }
+ return
+}
+
+// TODO: it feels weird that this modifies the prestate instance. However, it's needed because it will
+// subsequently be used in Commit.
+func (r *BALReader) StateRoot(prestate *StateDB) (root common.Hash, prestateLoadTime time.Duration, rootUpdateTime time.Duration) {
+ lastIdx := len(r.block.Transactions()) + 1
+ modifiedAccts := r.ModifiedAccounts()
+ startPrestateLoad := time.Now()
+ for _, addr := range modifiedAccts {
+ diff := r.readAccountDiff(addr, lastIdx)
+ acct := r.prestateReader.account(addr)
+ obj := r.initMutatedObjFromDiff(prestate, addr, acct, diff)
+ if obj != nil {
+ prestate.setStateObject(obj)
+ }
+ }
+ prestateLoadTime = time.Since(startPrestateLoad)
+ rootUpdateStart := time.Now()
+ root = prestate.IntermediateRoot(true)
+ rootUpdateTime = time.Since(rootUpdateStart)
+ return root, prestateLoadTime, rootUpdateTime
+}
+
+// changesAt returns all state changes at the given index.
+func (r *BALReader) changesAt(idx int) *bal.StateDiff {
+ res := &bal.StateDiff{Mutations: make(map[common.Address]*bal.AccountMutations)}
+ for addr, _ := range r.accesses {
+ accountChanges := r.accountChangesAt(addr, idx)
+ if accountChanges != nil {
+ res.Mutations[addr] = accountChanges
+ }
+ }
+ return res
+}
+
+// accountChangesAt returns the state changes of an account at a given index,
+// or nil if there are no changes.
+func (r *BALReader) accountChangesAt(addr common.Address, idx int) *bal.AccountMutations {
+ acct, exist := r.accesses[addr]
+ if !exist {
+ return nil
+ }
+
+ var res bal.AccountMutations
+
+ for i := len(acct.BalanceChanges) - 1; i >= 0; i-- {
+ if acct.BalanceChanges[i].TxIdx == uint16(idx) {
+ res.Balance = acct.BalanceChanges[i].Balance
+ }
+ if acct.BalanceChanges[i].TxIdx < uint16(idx) {
+ break
+ }
+ }
+
+ for i := len(acct.CodeChanges) - 1; i >= 0; i-- {
+ if acct.CodeChanges[i].TxIdx == uint16(idx) {
+ res.Code = acct.CodeChanges[i].Code
+ break
+ }
+ if acct.CodeChanges[i].TxIdx < uint16(idx) {
+ break
+ }
+ }
+
+ for i := len(acct.NonceChanges) - 1; i >= 0; i-- {
+ if acct.NonceChanges[i].TxIdx == uint16(idx) {
+ res.Nonce = &acct.NonceChanges[i].Nonce
+ break
+ }
+ if acct.NonceChanges[i].TxIdx < uint16(idx) {
+ break
+ }
+ }
+
+ for i := len(acct.StorageChanges) - 1; i >= 0; i-- {
+ if res.StorageWrites == nil {
+ res.StorageWrites = make(map[common.Hash]common.Hash)
+ }
+ slotWrites := acct.StorageChanges[i]
+
+ for j := len(slotWrites.Accesses) - 1; j >= 0; j-- {
+ if slotWrites.Accesses[j].TxIdx == uint16(idx) {
+ res.StorageWrites[slotWrites.Slot] = slotWrites.Accesses[j].ValueAfter
+ break
+ }
+ if slotWrites.Accesses[j].TxIdx < uint16(idx) {
+ break
+ }
+ }
+ if len(res.StorageWrites) == 0 {
+ res.StorageWrites = nil
+ }
+ }
+
+ if res.Code == nil && res.Nonce == nil && len(res.StorageWrites) == 0 && res.Balance == nil {
+ return nil
+ }
+ return &res
+}
+
+func (r *BALReader) isModified(addr common.Address) bool {
+ access, ok := r.accesses[addr]
+ if !ok {
+ return false
+ }
+ return len(access.StorageChanges) > 0 || len(access.BalanceChanges) > 0 || len(access.CodeChanges) > 0 || len(access.NonceChanges) > 0
+}
+
+func (r *BALReader) readAccount(db *StateDB, addr common.Address, idx int) *stateObject {
+ diff := r.readAccountDiff(addr, idx)
+ prestate := r.prestateReader.account(addr)
+ return r.initObjFromDiff(db, addr, prestate, diff)
+}
+
+// readAccountDiff returns the accumulated state changes of an account up through idx.
+func (r *BALReader) readAccountDiff(addr common.Address, idx int) *bal.AccountMutations {
+ diff, exist := r.accesses[addr]
+ if !exist {
+ return nil
+ }
+
+ var res bal.AccountMutations
+
+ for i := 0; i < len(diff.BalanceChanges) && diff.BalanceChanges[i].TxIdx <= uint16(idx); i++ {
+ res.Balance = diff.BalanceChanges[i].Balance
+ }
+
+ for i := 0; i < len(diff.CodeChanges) && diff.CodeChanges[i].TxIdx <= uint16(idx); i++ {
+ res.Code = diff.CodeChanges[i].Code
+ }
+
+ for i := 0; i < len(diff.NonceChanges) && diff.NonceChanges[i].TxIdx <= uint16(idx); i++ {
+ res.Nonce = &diff.NonceChanges[i].Nonce
+ }
+
+ if len(diff.StorageChanges) > 0 {
+ res.StorageWrites = make(map[common.Hash]common.Hash)
+ for _, slotWrites := range diff.StorageChanges {
+ for i := 0; i < len(slotWrites.Accesses) && slotWrites.Accesses[i].TxIdx <= uint16(idx); i++ {
+ res.StorageWrites[slotWrites.Slot] = slotWrites.Accesses[i].ValueAfter
+ }
+ }
+ }
+
+ return &res
+}
+
+func (r *BALReader) ValidateStateDiffRange(startIdx int, endIdx int, computedDiff *bal.StateDiff) error {
+ balChanges := &bal.StateDiff{Mutations: make(map[common.Address]*bal.AccountMutations)}
+ for idx := startIdx; idx <= endIdx; idx++ {
+ balChanges.Merge(r.changesAt(idx))
+ }
+ for addr, state := range balChanges.Mutations {
+ computedAccountDiff, ok := computedDiff.Mutations[addr]
+ if !ok {
+ return fmt.Errorf("BAL %d-%d contained account %x which wasn't present in computed state diff", startIdx, endIdx, addr)
+ }
+
+ if !state.Eq(computedAccountDiff) {
+ // 【添加详细日志】
+ log.Error("=== BAL value mismatch ===",
+ "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex(),
+ "block", r.block.Number())
+
+ // 比较 Balance
+ if state.Balance != nil || computedAccountDiff.Balance != nil {
+ balBalance := "nil"
+ if state.Balance != nil {
+ balBalance = state.Balance.String()
+ }
+ computedBalance := "nil"
+ if computedAccountDiff.Balance != nil {
+ computedBalance = computedAccountDiff.Balance.String()
+ }
+ log.Error(" Balance mismatch",
+ "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex(),
+ "BAL", balBalance,
+ "computed", computedBalance)
+ }
+
+ // 比较 Nonce
+ if state.Nonce != nil || computedAccountDiff.Nonce != nil {
+ balNonce := "nil"
+ if state.Nonce != nil {
+ balNonce = fmt.Sprintf("%d", *state.Nonce)
+ }
+ computedNonce := "nil"
+ if computedAccountDiff.Nonce != nil {
+ computedNonce = fmt.Sprintf("%d", *computedAccountDiff.Nonce)
+ }
+ log.Error(" Nonce mismatch",
+ "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex(),
+ "BAL", balNonce,
+ "computed", computedNonce)
+ }
+
+ // 比较 Storage
+ balStorageCount := 0
+ if state.StorageWrites != nil {
+ balStorageCount = len(state.StorageWrites)
+ }
+ computedStorageCount := 0
+ if computedAccountDiff.StorageWrites != nil {
+ computedStorageCount = len(computedAccountDiff.StorageWrites)
+ }
+ if balStorageCount != computedStorageCount {
+ log.Error(" Storage count mismatch",
+ "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex(),
+ "BAL_count", balStorageCount,
+ "computed_count", computedStorageCount)
+ }
+
+ return fmt.Errorf("difference between computed state diff and BAL %d-%d entry for account %x", startIdx, endIdx, addr)
+ }
+ }
+
+ if len(balChanges.Mutations) != len(computedDiff.Mutations) {
+ log.Error("Account count mismatch", "startIdx", startIdx,
+ "endIdx", endIdx,
+ "BAL_count", len(balChanges.Mutations),
+ "computed_count", len(computedDiff.Mutations))
+
+ balAccounts := make(map[common.Address]bool)
+ for addr := range balChanges.Mutations {
+ balAccounts[addr] = true
+ log.Error(" BAL has", "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex())
+ }
+
+ for addr := range computedDiff.Mutations {
+ if !balAccounts[addr] {
+ log.Error(" Computed has (NOT in BAL)", "startIdx", startIdx,
+ "endIdx", endIdx,
+ "address", addr.Hex())
+ }
+ }
+
+ return fmt.Errorf("computed state diff contained mutated accounts which weren't reported in BAL %d-%d", startIdx, endIdx)
+ }
+
+ return nil
+}
+
+// ValidateStateDiff returns an error if the computed state diff is not equal to
+// diff reported from the access list at the given index.
+func (r *BALReader) ValidateStateDiff(idx int, computedDiff *bal.StateDiff) error {
+ return r.ValidateStateDiffRange(idx, idx, computedDiff)
+}
diff --git a/core/state/database.go b/core/state/database.go
index cebebdd42c..04782f10f0 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -93,12 +93,18 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error
+ // UpdateAccountBatch attempts to update a list accounts in the batch manner.
+ UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error
+
// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
// database, a trie.MissingNodeError is returned.
UpdateStorage(addr common.Address, key, value []byte) error
+ // UpdateStorageBatch attempts to update a list storages in the batch manner.
+ UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error
+
// DeleteAccount abstracts an account deletion from the trie.
DeleteAccount(address common.Address) error
diff --git a/core/state/journal.go b/core/state/journal.go
index 34d4058762..01b301e93f 100644
--- a/core/state/journal.go
+++ b/core/state/journal.go
@@ -404,7 +404,7 @@ func (ch nonceChange) copy() journalEntry {
}
func (ch codeChange) revert(s *StateDB) {
- s.getStateObject(ch.account).setCode(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
+ s.getStateObject(ch.account).setCodeModified(crypto.Keccak256Hash(ch.prevCode), ch.prevCode)
}
func (ch codeChange) dirtied() *common.Address {
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 1dadc9b822..e2df079845 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -54,6 +54,9 @@ type stateObject struct {
origin *types.StateAccount // Account original data without any change applied, nil means it was not existent
data types.StateAccount // Account data with all mutations applied in the scope of block
+ txPreBalance *uint256.Int // the account balance after the last call to finalise
+ txPreNonce uint64 // the account nonce after the last call to finalise
+
// Write caches.
trie Trie // storage trie, which becomes non-nil on first access
code []byte // contract bytecode, which gets set when code is loaded
@@ -76,6 +79,8 @@ type stateObject struct {
// Cache flags.
dirtyCode bool // true if the code was updated
+ nonFinalizedCode bool // true if the code was updated since the last call to finalise
+
// Flag whether the account was marked as self-destructed. The self-destructed
// account is still accessible in the scope of same transaction.
selfDestructed bool
@@ -85,6 +90,8 @@ type stateObject struct {
// the contract is just created within the current transaction, or when the
// object was previously existent and is being deployed as a contract within
// the current transaction.
+ //
+ // the flag is set upon beginning of contract initcode execution, not when the code is actually deployed to the address.
newContract bool
}
@@ -99,12 +106,17 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s
if acct == nil {
acct = types.NewEmptyStateAccount()
}
+ if acct.Balance == nil {
+ acct.Balance = new(uint256.Int)
+ }
return &stateObject{
db: db,
address: address,
addrHash: crypto.Keccak256Hash(address[:]),
origin: origin,
data: *acct,
+ txPreBalance: cloneBalance(acct.Balance),
+ txPreNonce: acct.Nonce,
originStorage: make(Storage),
dirtyStorage: make(Storage),
pendingStorage: make(Storage),
@@ -112,6 +124,13 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s
}
}
+func cloneBalance(balance *uint256.Int) *uint256.Int {
+ if balance == nil {
+ return new(uint256.Int)
+ }
+ return balance.Clone()
+}
+
func (s *stateObject) markSelfdestructed() {
s.selfDestructed = true
}
@@ -266,7 +285,7 @@ func (s *stateObject) finalise() {
delete(s.uncommittedStorage, key)
} else if exist {
// The slot is modified to another value and the slot has been
- // tracked for commit, do nothing here.
+ // tracked for commit in uncommittedStorage.
} else {
// The slot is different from its original value and hasn't been
// tracked for commit yet.
@@ -293,6 +312,11 @@ func (s *stateObject) finalise() {
// of the newly-created object as it's no longer eligible for self-destruct
// by EIP-6780. For non-newly-created objects, it's a no-op.
s.newContract = false
+
+ s.nonFinalizedCode = false
+
+ s.txPreBalance = cloneBalance(s.data.Balance)
+ s.txPreNonce = s.data.Nonce
}
// updateTrie is responsible for persisting cached storage changes into the
@@ -343,8 +367,10 @@ func (s *stateObject) updateTrie() (Trie, error) {
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var (
- deletions []common.Hash
- used = make([]common.Hash, 0, len(s.uncommittedStorage))
+ deletions []common.Hash
+ used = make([]common.Hash, 0, len(s.uncommittedStorage))
+ updateKeys [][]byte
+ updateValues [][]byte
)
for key, origin := range s.uncommittedStorage {
// Skip noop changes, persist actual changes
@@ -358,10 +384,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
continue
}
if (value != common.Hash{}) {
- if err := tr.UpdateStorage(s.address, key[:], common.TrimLeftZeroes(value[:])); err != nil {
- s.db.setError(err)
- return nil, err
- }
+ updateKeys = append(updateKeys, key[:])
+ updateValues = append(updateValues, common.TrimLeftZeroes(value[:]))
s.db.StorageUpdated.Add(1)
} else {
deletions = append(deletions, key)
@@ -369,6 +393,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading
used = append(used, key) // Copy needed for closure
}
+ if len(updateKeys) > 0 {
+ if err := tr.UpdateStorageBatch(s.address, updateKeys, updateValues); err != nil {
+ s.db.setError(err)
+ return nil, err
+ }
+ }
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
@@ -521,6 +551,8 @@ func (s *stateObject) deepCopy(db *StateDB) *stateObject {
dirtyCode: s.dirtyCode,
selfDestructed: s.selfDestructed,
newContract: s.newContract,
+ txPreNonce: s.txPreNonce,
+ txPreBalance: cloneBalance(s.txPreBalance),
}
if s.trie != nil {
obj.trie = mustCopyTrie(s.trie)
@@ -579,15 +611,21 @@ func (s *stateObject) CodeSize() int {
func (s *stateObject) SetCode(codeHash common.Hash, code []byte) (prev []byte) {
prev = slices.Clone(s.code)
s.db.journal.setCode(s.address, prev)
- s.setCode(codeHash, code)
+ s.setCodeModified(codeHash, code)
return prev
}
func (s *stateObject) setCode(codeHash common.Hash, code []byte) {
s.code = code
s.data.CodeHash = codeHash[:]
+}
+
+// setCodeModified sets the code and hash and dirty markers.
+func (s *stateObject) setCodeModified(codeHash common.Hash, code []byte) {
+ s.setCode(codeHash, code)
s.dirtyCode = true
compiler.GenOrLoadOptimizedCode(codeHash, s.code)
+ s.nonFinalizedCode = true
}
func (s *stateObject) SetNonce(nonce uint64) {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index 2623132f54..3a35f928b0 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/core/types/bal"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
@@ -128,6 +130,14 @@ type StateDB struct {
// The tx context and all occurred logs in the scope of transaction.
thash common.Hash
txIndex int
+ sender common.Address
+
+ // block access list modifications will be recorded with this index.
+ // 0 - state access before transaction execution
+ // 1 -> len(block txs) - state access of each transaction
+ // len(block txs) + 1 - state access after transaction execution.
+ balIndex int
+
logs map[common.Hash][]*types.Log
logSize uint
@@ -138,9 +148,6 @@ type StateDB struct {
accessList *accessList
accessEvents *AccessEvents
- // block level access list
- blockAccessList *types.BlockAccessListRecord
-
// Transient storage
transientStorage transientStorage
@@ -151,6 +158,10 @@ type StateDB struct {
// State witness if cross validation is needed
witness *stateless.Witness // TODO(Nathan): more define the relation with `noTrie`
+ stateAccesses bal.StateAccesses // accounts/storage accessed during transaction execution
+
+ blockAccessList *BALReader
+
// Measurements gathered during execution for debugging purposes
AccountReads time.Duration
AccountHashes time.Duration
@@ -170,6 +181,10 @@ type StateDB struct {
StorageDeleted atomic.Int64 // Number of storage slots deleted during the state transition
}
+func (s *StateDB) BlockAccessList() *BALReader {
+ return s.blockAccessList
+}
+
// New creates a new state from a given trie.
func New(root common.Hash, db Database) (*StateDB, error) {
reader, err := db.Reader(root)
@@ -193,8 +208,8 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
preimages: make(map[common.Hash][]byte),
journal: newJournal(),
accessList: newAccessList(),
- blockAccessList: nil,
transientStorage: newTransientStorage(),
+ stateAccesses: make(bal.StateAccesses),
}
if db.TrieDB().IsVerkle() {
sdb.accessEvents = NewAccessEvents(db.PointCache())
@@ -202,13 +217,6 @@ func NewWithReader(root common.Hash, db Database, reader Reader) (*StateDB, erro
return sdb, nil
}
-func (s *StateDB) InitBlockAccessList() {
- if s.blockAccessList != nil {
- log.Warn("prepareBAL blockAccessList is not nil")
- }
- s.blockAccessList = &types.BlockAccessListRecord{Accounts: make(map[common.Address]types.AccountAccessListRecord)}
-}
-
func (s *StateDB) SetNeedBadSharedStorage(needBadSharedStorage bool) {
s.needBadSharedStorage = needBadSharedStorage
}
@@ -344,6 +352,38 @@ func (s *StateDB) AddRefund(gas uint64) {
s.refund += gas
}
+func (s *StateDB) SetBlockAccessList(al *BALReader) {
+ s.blockAccessList = al
+}
+
+// LoadModifiedPrestate instantiates the live object based on accounts
+// which appeared in the total state diff of a block, and were also preexisting.
+func (s *StateDB) LoadModifiedPrestate(addrs []common.Address) (res map[common.Address]*types.StateAccount) {
+ stateAccounts := new(sync.Map)
+ wg := new(sync.WaitGroup)
+ res = make(map[common.Address]*types.StateAccount)
+
+ for _, addr := range addrs {
+ wg.Add(1)
+ go func(addr common.Address) {
+ acct, err := s.reader.Account(addr)
+ if err == nil && acct != nil { // TODO: what should we do if the error is not nil?
+ stateAccounts.Store(addr, acct)
+ }
+ wg.Done()
+ }(addr)
+ }
+ wg.Wait()
+ stateAccounts.Range(func(addr any, val any) bool {
+ address := addr.(common.Address)
+ stateAccount := val.(*types.StateAccount)
+ res[address] = stateAccount
+ return true
+ })
+
+ return res
+}
+
// SubRefund removes gas from the refund counter.
// This method will panic if the refund counter goes below zero
func (s *StateDB) SubRefund(gas uint64) {
@@ -386,43 +426,6 @@ func (s *StateDB) GetNonce(addr common.Address) uint64 {
return 0
}
-func (s *StateDB) PreloadAccount(addr common.Address) {
- if s.Empty(addr) {
- return
- }
- s.GetCode(addr)
-}
-
-func (s *StateDB) PreloadStorage(addr common.Address, key common.Hash) {
- if s.Empty(addr) {
- return
- }
- s.GetState(addr, key)
-}
-func (s *StateDB) PreloadAccountTrie(addr common.Address) {
- if s.prefetcher == nil {
- return
- }
-
- addressesToPrefetch := []common.Address{addr}
- if err := s.prefetcher.prefetch(common.Hash{}, s.originalRoot, common.Address{}, addressesToPrefetch, nil, false); err != nil {
- log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err)
- }
-}
-
-func (s *StateDB) PreloadStorageTrie(addr common.Address, key common.Hash) {
- if s.prefetcher == nil {
- return
- }
- obj := s.getStateObject(addr)
- if obj == nil {
- return
- }
- if err := s.prefetcher.prefetch(obj.addrHash, obj.origin.Root, obj.address, nil, []common.Hash{key}, true); err != nil {
- log.Error("Failed to prefetch storage slot", "addr", obj.address, "key", key, "err", err)
- }
-}
-
// GetStorageRoot retrieves the storage root from the given address or empty
// if object not found.
func (s *StateDB) GetStorageRoot(addr common.Address) common.Hash {
@@ -472,7 +475,6 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash {
func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
stateObject := s.getStateObject(addr)
if stateObject != nil {
- s.blockAccessList.AddStorage(addr, hash, uint32(s.txIndex), false)
return stateObject.GetState(hash)
}
return common.Hash{}
@@ -488,6 +490,15 @@ func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) commo
return common.Hash{}
}
+// GetStateAndCommittedState returns the current value and the original value.
+func (s *StateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) {
+ stateObject := s.getStateObject(addr)
+ if stateObject != nil {
+ return stateObject.getState(hash)
+ }
+ return common.Hash{}, common.Hash{}
+}
+
// Database retrieves the low level database supporting the lower level trie ops.
func (s *StateDB) Database() Database {
return s.db
@@ -546,7 +557,7 @@ func (s *StateDB) SetNonce(addr common.Address, nonce uint64, reason tracing.Non
}
}
-func (s *StateDB) SetCode(addr common.Address, code []byte) (prev []byte) {
+func (s *StateDB) SetCode(addr common.Address, code []byte, reason tracing.CodeChangeReason) (prev []byte) {
stateObject := s.getOrNewStateObject(addr)
if stateObject != nil {
return stateObject.SetCode(crypto.Keccak256Hash(code), code)
@@ -555,7 +566,6 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) (prev []byte) {
}
func (s *StateDB) SetState(addr common.Address, key, value common.Hash) common.Hash {
- s.blockAccessList.AddStorage(addr, key, uint32(s.txIndex), true)
if stateObject := s.getOrNewStateObject(addr); stateObject != nil {
return stateObject.SetState(key, value)
}
@@ -651,11 +661,11 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common
return s.transientStorage.Get(addr, key)
}
-//
// Setting, updating & deleting state object methods.
//
-
// updateStateObject writes the given object to the trie.
+//
+//nolint:unused
func (s *StateDB) updateStateObject(obj *stateObject) {
if s.db.NoTries() {
return
@@ -669,6 +679,30 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
}
}
+func (s *StateDB) updateStateObjects(objs []*stateObject) {
+ if s.db.NoTries() {
+ return
+ }
+ var addrs []common.Address
+ var accts []*types.StateAccount
+ var codeLens []int
+
+ for _, obj := range objs {
+ addrs = append(addrs, obj.Address())
+ accts = append(accts, &obj.data)
+ codeLens = append(codeLens, len(obj.code))
+ }
+
+ if err := s.trie.UpdateAccountBatch(addrs, accts, codeLens); err != nil {
+ s.setError(fmt.Errorf("updateStateObjects error: %v", err))
+ }
+
+ for _, obj := range objs {
+ if obj.dirtyCode {
+ s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
+ }
+ }
+}
// deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(addr common.Address) {
@@ -685,7 +719,6 @@ func (s *StateDB) deleteStateObject(addr common.Address) {
// getStateObject retrieves a state object given by the address, returning nil if
// the object is not found or was deleted in this execution context.
func (s *StateDB) getStateObject(addr common.Address) *stateObject {
- s.blockAccessList.AddAccount(addr, uint32(s.txIndex))
// Prefer live objects if any is available
if obj := s.stateObjects[addr]; obj != nil {
return obj
@@ -694,6 +727,24 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
if _, ok := s.stateObjectsDestruct[addr]; ok {
return nil
}
+
+ // if we are executing against a block access list, construct the account
+ // state at the current tx index by applying the access-list diff on top
+ // of the prestate value for the account.
+ if s.blockAccessList != nil && s.balIndex != 0 && s.blockAccessList.isModified(addr) {
+ acct := s.blockAccessList.readAccount(s, addr, s.balIndex-1)
+ if acct != nil {
+ s.setStateObject(acct)
+ return acct
+ }
+ return nil
+
+ // if the acct was nil, it might be non-existent or was not explicitly requested for loading from the blockAcccessList object.
+ // try to load it from the snapshot.
+
+ // TODO: if the acct was non-existent because it was deleted, we should just return nil herre.
+ }
+
s.AccountLoaded++
start := time.Now()
@@ -732,6 +783,7 @@ func (s *StateDB) getOrNewStateObject(addr common.Address) *stateObject {
if obj == nil {
obj = s.createObject(addr)
}
+
return obj
}
@@ -798,14 +850,6 @@ func (s *StateDB) CopyDoPrefetch() *StateDB {
return s.copyInternal(true)
}
-func (s *StateDB) TransferBlockAccessList(prev *StateDB) {
- if prev == nil {
- return
- }
- s.blockAccessList = prev.blockAccessList
- prev.blockAccessList = nil
-}
-
// If doPrefetch is true, it tries to reuse the prefetcher, the copied StateDB will do active trie prefetch.
// otherwise, just do inactive copy trie prefetcher.
func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
@@ -823,10 +867,14 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
refund: s.refund,
thash: s.thash,
txIndex: s.txIndex,
+ balIndex: s.balIndex,
logs: make(map[common.Hash][]*types.Log, len(s.logs)),
logSize: s.logSize,
preimages: maps.Clone(s.preimages),
+ stateAccesses: make(bal.StateAccesses), // Don't deep copy state accesses
+ blockAccessList: s.blockAccessList,
+
// Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never
@@ -834,7 +882,6 @@ func (s *StateDB) copyInternal(doPrefetch bool) *StateDB {
// empty lists, so we do it anyway to not blow up if we ever decide copy them
// in the middle of a transaction.
accessList: s.accessList.Copy(),
- blockAccessList: nil,
transientStorage: s.transientStorage.Copy(),
journal: s.journal.copy(),
}
@@ -897,6 +944,9 @@ func (s *StateDB) GetRefund() uint64 {
// Finalise finalises the state by removing the destructed objects and clears
// the journal as well as the refunds. Finalise, however, will not push any updates
// into the tries just yet. Only IntermediateRoot or Commit will do that.
+//
+// If EnableStateDiffRecording has been called, it returns a state diff containing
+// the state which was mutated since the previous invocation of Finalise. Otherwise, nil.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch := make([]common.Address, 0, len(s.journal.dirties))
for addr := range s.journal.dirties {
@@ -921,9 +971,9 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
} else {
obj.finalise()
+
s.markUpdate(addr)
- }
- // At this point, also ship the address off to the precacher. The precacher
+ } // At this point, also ship the address off to the precacher. The precacher
// will start loading tries, and when the change is eventually committed,
// the commit-phase will be a lot faster
addressesToPrefetch = append(addressesToPrefetch, addr) // Copy needed for closure
@@ -933,6 +983,7 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
log.Error("Failed to prefetch addresses", "addresses", len(addressesToPrefetch), "err", err)
}
}
+
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()
}
@@ -1070,6 +1121,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
var (
usedAddrs []common.Address
deletedAddrs []common.Address
+ updatedObjs []*stateObject
)
for addr, op := range s.mutations {
if op.applied {
@@ -1080,11 +1132,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if op.isDelete() {
deletedAddrs = append(deletedAddrs, addr)
} else {
- s.updateStateObject(s.stateObjects[addr])
+ updatedObjs = append(updatedObjs, s.stateObjects[addr])
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
}
+ if len(updatedObjs) > 0 {
+ s.updateStateObjects(updatedObjs)
+ }
for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1
@@ -1103,7 +1158,20 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
hash := s.trie.Hash()
-
+ /*
+ it, err := s.trie.NodeIterator([]byte{})
+ if err != nil {
+ panic(err)
+ }
+ fmt.Println("state trie")
+ for it.Next(true) {
+ if it.Leaf() {
+ fmt.Printf("%x: %x\n", it.Path(), it.LeafBlob())
+ } else {
+ fmt.Printf("%x: %x\n", it.Path(), it.Hash())
+ }
+ }
+ */
// If witness building is enabled, gather the account trie witness
if s.witness != nil {
s.witness.AddState(s.trie.Witness())
@@ -1122,6 +1190,19 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) SetTxContext(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
+ s.balIndex = ti + 1
+}
+
+// SetAccessListIndex sets the current index that state mutations will
+// be reported as in the BAL. It is only relevant if this StateDB instance
+// is being used in the BAL construction path.
+func (s *StateDB) SetAccessListIndex(idx int) {
+ s.balIndex = idx
+}
+
+// SetTxSender sets the sender of the currently-executing transaction.
+func (s *StateDB) SetTxSender(sender common.Address) {
+ s.sender = sender
}
// StateDB.Prepare is not called before processing a system transaction, call ClearAccessList instead.
@@ -1317,6 +1398,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateU
s.StopPrefetcher()
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
}
+
// Finalize any pending changes and merge everything into the tries
s.IntermediateRoot(deleteEmptyObjects)
@@ -1687,52 +1769,7 @@ func (s *StateDB) AccessEvents() *AccessEvents {
return s.accessEvents
}
-func (s *StateDB) DumpAccessList(block *types.Block) {
- if s.blockAccessList == nil {
- return
- }
- accountCount := 0
- storageCount := 0
- dirtyStorageCount := 0
- for addr, account := range s.blockAccessList.Accounts {
- accountCount++
- log.Debug(" DumpAccessList Address", "address", addr.Hex(), "txIndex", account.TxIndex)
- for _, storageItem := range account.StorageItems {
- log.Debug(" DumpAccessList Storage Item", "key", storageItem.Key.Hex(), "txIndex", storageItem.TxIndex, "dirty", storageItem.Dirty)
- storageCount++
- if storageItem.Dirty {
- dirtyStorageCount++
- }
- }
- }
- log.Info("DumpAccessList", "blockNumber", block.NumberU64(), "GasUsed", block.GasUsed(),
- "accountCount", accountCount, "storageCount", storageCount, "dirtyStorageCount", dirtyStorageCount)
-}
-
-// GetEncodedBlockAccessList: convert BlockAccessListRecord to BlockAccessListEncode
-func (s *StateDB) GetEncodedBlockAccessList(block *types.Block) *types.BlockAccessListEncode {
- if s.blockAccessList == nil {
- return nil
- }
- // encode block access list to rlp to propagate with the block
- blockAccessList := types.BlockAccessListEncode{
- Version: 0,
- Number: block.NumberU64(),
- Hash: block.Hash(),
- SignData: make([]byte, 65),
- Accounts: make([]types.AccountAccessListEncode, 0),
- }
- for addr, account := range s.blockAccessList.Accounts {
- accountAccessList := types.AccountAccessListEncode{
- TxIndex: account.TxIndex,
- Address: addr,
- StorageItems: make([]types.StorageAccessItem, 0),
- }
- for _, storageItem := range account.StorageItems {
- accountAccessList.StorageItems = append(accountAccessList.StorageItems, storageItem)
- }
- blockAccessList.Accounts = append(blockAccessList.Accounts, accountAccessList)
- }
-
- return &blockAccessList
+func (s *StateDB) IsAddressInMutations(addr common.Address) bool {
+ _, ok := s.mutations[addr]
+ return ok
}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 18d67b00b9..73b4f5aff6 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -89,7 +89,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
- s.SetCode(addr, code)
+ s.SetCode(addr, code, tracing.CodeChangeUnspecified)
},
args: make([]int64, 2),
},
diff --git a/core/state/statedb_hooked.go b/core/state/statedb_hooked.go
index 9597178a79..3ce3e14e44 100644
--- a/core/state/statedb_hooked.go
+++ b/core/state/statedb_hooked.go
@@ -54,26 +54,37 @@ func (s *hookedStateDB) CreateContract(addr common.Address) {
}
func (s *hookedStateDB) GetBalance(addr common.Address) *uint256.Int {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetBalance(addr)
}
-func (s *hookedStateDB) SetBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) {
- s.inner.SetBalance(addr, amount, reason)
-}
-
func (s *hookedStateDB) GetNonce(addr common.Address) uint64 {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetNonce(addr)
}
func (s *hookedStateDB) GetCodeHash(addr common.Address) common.Hash {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetCodeHash(addr)
}
func (s *hookedStateDB) GetCode(addr common.Address) []byte {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetCode(addr)
}
func (s *hookedStateDB) GetCodeSize(addr common.Address) int {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetCodeSize(addr)
}
@@ -89,15 +100,24 @@ func (s *hookedStateDB) GetRefund() uint64 {
return s.inner.GetRefund()
}
-func (s *hookedStateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash {
- return s.inner.GetCommittedState(addr, hash)
-}
-
func (s *hookedStateDB) GetState(addr common.Address, hash common.Hash) common.Hash {
+ if s.hooks.OnStorageRead != nil {
+ s.hooks.OnStorageRead(addr, hash)
+ }
return s.inner.GetState(addr, hash)
}
+func (s *hookedStateDB) GetStateAndCommittedState(addr common.Address, hash common.Hash) (common.Hash, common.Hash) {
+ if s.hooks.OnStorageRead != nil {
+ s.hooks.OnStorageRead(addr, hash)
+ }
+ return s.inner.GetStateAndCommittedState(addr, hash)
+}
+
func (s *hookedStateDB) GetStorageRoot(addr common.Address) common.Hash {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.GetStorageRoot(addr)
}
@@ -110,14 +130,23 @@ func (s *hookedStateDB) SetTransientState(addr common.Address, key, value common
}
func (s *hookedStateDB) HasSelfDestructed(addr common.Address) bool {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.HasSelfDestructed(addr)
}
func (s *hookedStateDB) Exist(addr common.Address) bool {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.Exist(addr)
}
func (s *hookedStateDB) Empty(addr common.Address) bool {
+ if s.hooks.OnAccountRead != nil {
+ s.hooks.OnAccountRead(addr)
+ }
return s.inner.Empty(addr)
}
@@ -137,10 +166,6 @@ func (s *hookedStateDB) AddSlotToAccessList(addr common.Address, slot common.Has
s.inner.AddSlotToAccessList(addr, slot)
}
-func (s *hookedStateDB) ClearAccessList() {
- s.inner.ClearAccessList()
-}
-
func (s *hookedStateDB) PointCache() *utils.PointCache {
return s.inner.PointCache()
}
@@ -205,14 +230,20 @@ func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tr
}
}
-func (s *hookedStateDB) SetCode(address common.Address, code []byte) []byte {
- prev := s.inner.SetCode(address, code)
- if s.hooks.OnCodeChange != nil {
+func (s *hookedStateDB) SetCode(address common.Address, code []byte, reason tracing.CodeChangeReason) []byte {
+ prev := s.inner.SetCode(address, code, reason)
+ if s.hooks.OnCodeChangeV2 != nil || s.hooks.OnCodeChange != nil {
prevHash := types.EmptyCodeHash
if len(prev) != 0 {
prevHash = crypto.Keccak256Hash(prev)
}
- s.hooks.OnCodeChange(address, prevHash, prev, crypto.Keccak256Hash(code), code)
+ codeHash := crypto.Keccak256Hash(code)
+
+ if s.hooks.OnCodeChangeV2 != nil {
+ s.hooks.OnCodeChangeV2(address, prevHash, prev, codeHash, code, reason)
+ } else if s.hooks.OnCodeChange != nil {
+ s.hooks.OnCodeChange(address, prevHash, prev, codeHash, code)
+ }
}
return prev
}
@@ -269,10 +300,6 @@ func (s *hookedStateDB) SelfDestruct6780(address common.Address) (uint256.Int, b
return prev, changed
}
-func (s *hookedStateDB) NoTries() bool {
- return s.inner.NoTries()
-}
-
func (s *hookedStateDB) AddLog(log *types.Log) {
// The inner will modify the log (add fields), so invoke that first
s.inner.AddLog(log)
@@ -281,10 +308,6 @@ func (s *hookedStateDB) AddLog(log *types.Log) {
}
}
-func (s *hookedStateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash, blockTime uint64) []*types.Log {
- return s.inner.GetLogs(hash, blockNumber, blockHash, blockTime)
-}
-
func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
defer s.inner.Finalise(deleteEmptyObjects)
if s.hooks.OnBalanceChange == nil {
@@ -293,7 +316,6 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
for addr := range s.inner.journal.dirties {
obj := s.inner.stateObjects[addr]
if obj != nil && obj.selfDestructed {
- // If ether was sent to account post-selfdestruct it is burnt.
if bal := obj.Balance(); bal.Sign() != 0 {
s.hooks.OnBalanceChange(addr, bal.ToBig(), new(big.Int), tracing.BalanceDecreaseSelfdestructBurn)
}
@@ -301,6 +323,20 @@ func (s *hookedStateDB) Finalise(deleteEmptyObjects bool) {
}
}
-func (s *hookedStateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
- return s.inner.IntermediateRoot(deleteEmptyObjects)
+func (s *hookedStateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash, blockTime uint64) []*types.Log {
+ return s.inner.GetLogs(hash, blockNumber, blockHash, blockTime)
+}
+
+func (s *hookedStateDB) IntermediateRoot(deleteEmpty bool) common.Hash {
+ return s.inner.IntermediateRoot(deleteEmpty)
+}
+func (s *hookedStateDB) Database() Database {
+ return s.inner.Database()
+}
+func (s *hookedStateDB) GetTrie() Trie {
+ return s.inner.GetTrie()
+}
+
+func (s *hookedStateDB) IsAddressInMutations(addr common.Address) bool {
+ return s.inner.IsAddressInMutations(addr)
}
diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go
index f319b0e63c..bacb7baee1 100644
--- a/core/state/statedb_hooked_test.go
+++ b/core/state/statedb_hooked_test.go
@@ -114,7 +114,7 @@ func TestHooks(t *testing.T) {
sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer)
sdb.SetNonce(common.Address{0xaa}, 1337, tracing.NonceChangeGenesis)
- sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37})
+ sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}, tracing.CodeChangeUnspecified)
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11"))
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22"))
sdb.SetTransientState(common.Address{0xaa}, common.HexToHash("0x02"), common.HexToHash("0x01"))
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 21ee5523ca..10c555075e 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -65,7 +65,7 @@ func TestUpdateLeaks(t *testing.T) {
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
}
if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i})
+ state.SetCode(addr, []byte{i, i, i, i, i}, tracing.CodeChangeUnspecified)
}
}
@@ -101,7 +101,7 @@ func TestIntermediateLeaks(t *testing.T) {
state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
}
if i%3 == 0 {
- state.SetCode(addr, []byte{i, i, i, i, i, tweak})
+ state.SetCode(addr, []byte{i, i, i, i, i, tweak}, tracing.CodeChangeUnspecified)
}
}
@@ -374,7 +374,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
code := make([]byte, 16)
binary.BigEndian.PutUint64(code, uint64(a.args[0]))
binary.BigEndian.PutUint64(code[8:], uint64(a.args[1]))
- s.SetCode(addr, code)
+ s.SetCode(addr, code, tracing.CodeChangeUnspecified)
},
args: make([]int64, 2),
},
@@ -403,7 +403,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
// which would cause a difference in state when unrolling
// the journal. (CreateContact assumes created was false prior to
// invocation, and the journal rollback sets it to false).
- s.SetCode(addr, []byte{1})
+ s.SetCode(addr, []byte{1}, tracing.CodeChangeUnspecified)
}
},
},
@@ -731,7 +731,7 @@ func TestCopyCommitCopy(t *testing.T) {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@@ -804,7 +804,7 @@ func TestCopyCopyCommitCopy(t *testing.T) {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@@ -874,7 +874,7 @@ func TestCommitCopy(t *testing.T) {
sval1, sval2 := common.HexToHash("b1"), common.HexToHash("b2")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey1, sval1) // Change the storage trie
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
@@ -987,10 +987,10 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
addr := common.BytesToAddress([]byte("so"))
{
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
- state.SetCode(addr, []byte{1, 2, 3})
+ state.SetCode(addr, []byte{1, 2, 3}, tracing.CodeChangeUnspecified)
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
- state.SetCode(a2, []byte{1, 2, 4})
+ state.SetCode(a2, []byte{1, 2, 4}, tracing.CodeChangeUnspecified)
root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index 8d3cb1536f..90965dad26 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -40,7 +40,7 @@ func filledStateDB() *StateDB {
sval := common.HexToHash("bbb")
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
for i := 0; i < 100; i++ {
sk := common.BigToHash(big.NewInt(int64(i)))
@@ -143,7 +143,7 @@ func testVerklePrefetcher(t *testing.T) {
sval := testrand.Hash()
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
- state.SetCode(addr, []byte("hello")) // Change an external metadata
+ state.SetCode(addr, []byte("hello"), tracing.CodeChangeUnspecified) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
root, _ := state.Commit(0, true, false)
diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go
index 68fb444188..033740b18b 100644
--- a/core/state_prefetcher.go
+++ b/core/state_prefetcher.go
@@ -25,14 +25,11 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/sync/errgroup"
)
const prefetchMiningThread = 3
-const prefetchThreadBALSnapshot = 8
-const prefetchThreadBALTrie = 8
const checkInterval = 10
// statePrefetcher is a basic Prefetcher that executes transactions from a block
@@ -132,147 +129,6 @@ func (p *statePrefetcher) Prefetch(transactions types.Transactions, header *type
return
}
-func (p *statePrefetcher) PrefetchBALSnapshot(balPrefetch *types.BlockAccessListPrefetch, block *types.Block, txSize int, statedb *state.StateDB, interruptChan <-chan struct{}) {
- accChan := make(chan struct {
- txIndex uint32
- accAddr common.Address
- }, prefetchThreadBALSnapshot)
-
- keyChan := make(chan struct {
- txIndex uint32
- accAddr common.Address
- key common.Hash
- }, prefetchThreadBALSnapshot)
-
- // prefetch snapshot cache
- for i := 0; i < prefetchThreadBALSnapshot; i++ {
- go func() {
- newStatedb := statedb.CopyDoPrefetch()
- for {
- select {
- case accAddr := <-accChan:
- log.Debug("PrefetchBALSnapshot", "txIndex", accAddr.txIndex, "accAddr", accAddr.accAddr)
- newStatedb.PreloadAccount(accAddr.accAddr)
- case item := <-keyChan:
- log.Debug("PrefetchBALSnapshot", "txIndex", item.txIndex, "accAddr", item.accAddr, "key", item.key)
- newStatedb.PreloadStorage(item.accAddr, item.key)
- case <-interruptChan:
- return
- }
- }
- }()
- }
- for txIndex := 0; txIndex < txSize; txIndex++ {
- txAccessList := balPrefetch.AccessListItems[uint32(txIndex)]
- for accAddr, storageItems := range txAccessList.Accounts {
- select {
- case accChan <- struct {
- txIndex uint32
- accAddr common.Address
- }{
- txIndex: uint32(txIndex),
- accAddr: accAddr,
- }:
- case <-interruptChan:
- return
- }
- for _, storageItem := range storageItems {
- select {
- case keyChan <- struct {
- txIndex uint32
- accAddr common.Address
- key common.Hash
- }{
- txIndex: uint32(txIndex),
- accAddr: accAddr,
- key: storageItem.Key,
- }:
- case <-interruptChan:
- return
- }
- }
- }
- }
- log.Debug("PrefetchBALSnapshot dispatch finished")
-}
-
-func (p *statePrefetcher) PrefetchBALTrie(balPrefetch *types.BlockAccessListPrefetch, block *types.Block, statedb *state.StateDB, interruptChan <-chan struct{}) {
- accItemsChan := make(chan struct {
- txIndex uint32
- accAddr common.Address
- items []types.StorageAccessItemPrefetch
- }, prefetchThreadBALTrie)
-
- for i := 0; i < prefetchThreadBALTrie; i++ {
- go func() {
- newStatedb := statedb.CopyDoPrefetch()
- for {
- select {
- case accItem := <-accItemsChan:
- newStatedb.PreloadAccountTrie(accItem.accAddr)
- log.Debug("PrefetchBALTrie", "txIndex", accItem.txIndex, "accAddr", accItem.accAddr)
- for _, storageItem := range accItem.items {
- if storageItem.Dirty {
- log.Debug("PrefetchBALTrie", "txIndex", accItem.txIndex, "accAddr", accItem.accAddr, "storageItem", storageItem.Key, "dirty", storageItem.Dirty)
- statedb.PreloadStorageTrie(accItem.accAddr, storageItem.Key)
- }
- }
- case <-interruptChan:
- return
- }
- }
- }()
- }
-
- for txIndex, txAccessList := range balPrefetch.AccessListItems {
- for accAddr, storageItems := range txAccessList.Accounts {
- select {
- case accItemsChan <- struct {
- txIndex uint32
- accAddr common.Address
- items []types.StorageAccessItemPrefetch
- }{
- txIndex: txIndex,
- accAddr: accAddr,
- items: storageItems,
- }:
- case <-interruptChan:
- log.Warn("PrefetchBALTrie interrupted")
- return
- }
- }
- }
- log.Debug("PrefetchBALTrie dispatch finished")
-}
-
-func (p *statePrefetcher) PrefetchBAL(block *types.Block, statedb *state.StateDB, interruptChan <-chan struct{}) {
- if block.BAL() == nil {
- return
- }
- transactions := block.Transactions()
- blockAccessList := block.BAL()
-
- // get index sorted block access list, each transaction has a list of accounts, each account has a list of storage items
- // txIndex 0:
- // account1: storage1_1, storage1_2, storage1_3
- // account2: storage2_1, storage2_2, storage2_3
- // txIndex 1:
- // account3: storage3_1, storage3_2, storage3_3
- // ...
- balPrefetch := types.BlockAccessListPrefetch{
- AccessListItems: make(map[uint32]types.TxAccessListPrefetch),
- }
- for _, account := range blockAccessList.Accounts {
- balPrefetch.Update(&account)
- }
-
- // prefetch snapshot cache
- go p.PrefetchBALSnapshot(&balPrefetch, block, len(transactions), statedb, interruptChan)
-
- // prefetch MPT trie node cache
- go p.PrefetchBALTrie(&balPrefetch, block, statedb, interruptChan)
-}
-
// PrefetchMining processes the state changes according to the Ethereum rules by running
// the transaction messages using the statedb, but any changes are discarded. The
// only goal is to warm the state caches. Only used for mining stage.
diff --git a/core/state_processor.go b/core/state_processor.go
index 9d3240036d..22e7764f25 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -77,13 +77,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
misc.ApplyDAOHardFork(statedb)
}
- lastBlock := p.chain.GetHeaderByHash(block.ParentHash())
- if lastBlock == nil {
- return nil, errors.New("could not get parent block")
- }
- // Handle upgrade built-in system contract code
- systemcontracts.TryUpdateBuildInSystemContract(p.config, blockNumber, lastBlock.Time, block.Time(), statedb, true)
-
var (
context vm.BlockContext
signer = types.MakeSigner(p.config, header.Number, header.Time)
@@ -92,13 +85,20 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
)
// Apply pre-execution system calls.
- var tracingStateDB = vm.StateDB(statedb)
+ var tracingStateDB vm.StateDB = statedb
if hooks := cfg.Tracer; hooks != nil {
tracingStateDB = state.NewHookedState(statedb, hooks)
}
context = NewEVMBlockContext(header, p.chain, nil)
evm := vm.NewEVM(context, tracingStateDB, p.config, cfg)
+ lastBlock := p.chain.GetHeaderByHash(block.ParentHash())
+ if lastBlock == nil {
+ return nil, errors.New("could not get parent block")
+ }
+ // Handle upgrade built-in system contract code
+ systemcontracts.TryUpdateBuildInSystemContract(p.config, blockNumber, lastBlock.Time, block.Time(), tracingStateDB, true)
+
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
ProcessBeaconBlockRoot(*beaconRoot, evm)
}
@@ -116,6 +116,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
// usually do have two tx, one for validator set contract, another for system reward contract.
systemTxs := make([]*types.Transaction, 0, 2)
+ if hooks := cfg.Tracer; hooks != nil && hooks.OnPreTxExecutionDone != nil {
+ hooks.OnPreTxExecutionDone()
+ }
+
for i, tx := range block.Transactions() {
if isPoSA {
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
@@ -149,6 +153,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
commonTxs = append(commonTxs, tx)
receipts = append(receipts, receipt)
}
+
bloomProcessors.Close()
// Read requests if Prague is enabled.
@@ -178,10 +183,14 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
if err != nil {
return nil, err
}
+ if hooks := cfg.Tracer; hooks != nil && hooks.OnBlockFinalization != nil {
+ hooks.OnBlockFinalization()
+ }
+
for _, receipt := range receipts {
allLogs = append(allLogs, receipt.Logs...)
}
- statedb.DumpAccessList(block)
+
return &ProcessResult{
Receipts: receipts,
Requests: requests,
@@ -219,6 +228,7 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
if err != nil {
return nil, err
}
+
// Update the state with pending changes.
var root []byte
if evm.ChainConfig().IsByzantium(blockNumber) {
@@ -233,7 +243,6 @@ func ApplyTransactionWithEVM(msg *Message, gp *GasPool, statedb *state.StateDB,
if statedb.Database().TrieDB().IsVerkle() {
statedb.AccessEvents().Merge(evm.AccessEvents)
}
-
return MakeReceipt(evm, result, statedb, blockNumber, blockHash, blockTime, tx, *usedGas, root, receiptProcessors...), nil
}
@@ -288,11 +297,6 @@ func ApplyTransaction(evm *vm.EVM, gp *GasPool, statedb *state.StateDB, header *
// contract. This method is exported to be used in tests.
func ProcessBeaconBlockRoot(beaconRoot common.Hash, evm *vm.EVM) {
// Return immediately if beaconRoot equals the zero hash when using the Parlia engine.
- if beaconRoot == (common.Hash{}) {
- if chainConfig := evm.ChainConfig(); chainConfig != nil && chainConfig.Parlia != nil {
- return
- }
- }
if tracer := evm.Config.Tracer; tracer != nil {
onSystemCallStart(tracer, evm.GetVMContext())
if tracer.OnSystemCallEnd != nil {
@@ -374,13 +378,13 @@ func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte
evm.SetTxContext(NewEVMTxContext(msg))
evm.StateDB.AddAddressToAccessList(addr)
ret, _, err := evm.Call(msg.From, *msg.To, msg.Data, 30_000_000, common.U2560)
- evm.StateDB.Finalise(true)
if err != nil {
return fmt.Errorf("system call failed to execute: %v", err)
}
if len(ret) == 0 {
return nil // skip empty output
}
+ evm.StateDB.Finalise(true)
// Append prefixed requestsData to the requests list.
requestsData := make([]byte, len(ret)+1)
requestsData[0] = requestType
diff --git a/core/state_transition.go b/core/state_transition.go
index 8108e5c178..289d272a10 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -630,12 +630,12 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization)
st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization)
if auth.Address == (common.Address{}) {
// Delegation to zero address means clear.
- st.state.SetCode(authority, nil)
+ st.state.SetCode(authority, nil, tracing.CodeChangeUnspecified)
return nil
}
// Otherwise install delegation to auth.Address.
- st.state.SetCode(authority, types.AddressToDelegation(auth.Address))
+ st.state.SetCode(authority, types.AddressToDelegation(auth.Address), tracing.CodeChangeUnspecified)
return nil
}
diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go
index 8920379cf3..32c24df3b1 100644
--- a/core/systemcontracts/upgrade.go
+++ b/core/systemcontracts/upgrade.go
@@ -1064,7 +1064,7 @@ func TryUpdateBuildInSystemContract(config *params.ChainConfig, blockNumber *big
}
// HistoryStorageAddress is a special system contract in bsc, which can't be upgraded
if config.IsOnPrague(blockNumber, lastBlockTime, blockTime) {
- statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode)
+ statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified)
statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeNewContract)
log.Info("Set code for HistoryStorageAddress", "blockNumber", blockNumber.Int64(), "blockTime", blockTime)
}
@@ -1200,7 +1200,7 @@ func applySystemContractUpgrade(upgrade *Upgrade, blockNumber *big.Int, statedb
if err != nil {
panic(fmt.Errorf("failed to decode new contract code: %s", err.Error()))
}
- statedb.SetCode(cfg.ContractAddr, newContractCode)
+ statedb.SetCode(cfg.ContractAddr, newContractCode, tracing.CodeChangeUnspecified)
if cfg.AfterUpgrade != nil {
err := cfg.AfterUpgrade(blockNumber, cfg.ContractAddr, statedb)
diff --git a/core/tracing/gen_code_change_reason_stringer.go b/core/tracing/gen_code_change_reason_stringer.go
new file mode 100644
index 0000000000..9372954063
--- /dev/null
+++ b/core/tracing/gen_code_change_reason_stringer.go
@@ -0,0 +1,29 @@
+// Code generated by "stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go"; DO NOT EDIT.
+
+package tracing
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[CodeChangeUnspecified-0]
+ _ = x[CodeChangeContractCreation-1]
+ _ = x[CodeChangeGenesis-2]
+ _ = x[CodeChangeAuthorization-3]
+ _ = x[CodeChangeAuthorizationClear-4]
+ _ = x[CodeChangeSelfDestruct-5]
+ _ = x[CodeChangeRevert-6]
+}
+
+const _CodeChangeReason_name = "UnspecifiedContractCreationGenesisAuthorizationAuthorizationClearSelfDestructRevert"
+
+var _CodeChangeReason_index = [...]uint8{0, 11, 27, 34, 47, 65, 77, 83}
+
+func (i CodeChangeReason) String() string {
+ if i >= CodeChangeReason(len(_CodeChangeReason_index)-1) {
+ return "CodeChangeReason(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _CodeChangeReason_name[_CodeChangeReason_index[i]:_CodeChangeReason_index[i+1]]
+}
diff --git a/core/tracing/gen_nonce_change_reason_stringer.go b/core/tracing/gen_nonce_change_reason_stringer.go
index f775c1f3a6..cd19200db8 100644
--- a/core/tracing/gen_nonce_change_reason_stringer.go
+++ b/core/tracing/gen_nonce_change_reason_stringer.go
@@ -15,11 +15,12 @@ func _() {
_ = x[NonceChangeNewContract-4]
_ = x[NonceChangeAuthorization-5]
_ = x[NonceChangeRevert-6]
+ _ = x[NonceChangeSelfdestruct-7]
}
-const _NonceChangeReason_name = "UnspecifiedGenesisEoACallContractCreatorNewContractAuthorizationRevert"
+const _NonceChangeReason_name = "UnspecifiedGenesisEoACallContractCreatorNewContractAuthorizationRevertSelfdestruct"
-var _NonceChangeReason_index = [...]uint8{0, 11, 18, 25, 40, 51, 64, 70}
+var _NonceChangeReason_index = [...]uint8{0, 11, 18, 25, 40, 51, 64, 70, 82}
func (i NonceChangeReason) String() string {
if i >= NonceChangeReason(len(_NonceChangeReason_index)-1) {
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index 0590a010f0..2b8d455a65 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -214,12 +214,23 @@ type (
// CodeChangeHook is called when the code of an account changes.
CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)
+ // CodeChangeHookV2 is called when the code of an account changes.
+ CodeChangeHookV2 = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte, reason CodeChangeReason)
+
// StorageChangeHook is called when the storage of an account changes.
StorageChangeHook = func(addr common.Address, slot common.Hash, prev, new common.Hash)
// LogHook is called when a log is emitted.
LogHook = func(log *types.Log)
+ SelfDestructHook = func(address common.Address)
+
+ // AccountReadHook is called when the account is accessed.
+ AccountReadHook = func(addr common.Address)
+
+ // StorageReadHook is called when the storage slot is accessed.
+ StorageReadHook = func(addr common.Address, slot common.Hash)
+
// BlockHashReadHook is called when EVM reads the blockhash of a block.
BlockHashReadHook = func(blockNumber uint64, hash common.Hash)
)
@@ -248,15 +259,25 @@ type Hooks struct {
OnSystemTxEnd OnSystemTxEndHook
OnSystemTxFixIntrinsicGas OnSystemTxFixIntrinsicGasHook
+ OnPreTxExecutionDone func() // called after pre-tx system contracts are invoked
+ OnBlockFinalization func() // called after post-tx system contracts and consensus finalization are invoked
+
// State events
OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook
+ OnCodeChangeV2 CodeChangeHookV2
OnStorageChange StorageChangeHook
OnLog LogHook
// Block hash read
OnBlockHashRead BlockHashReadHook
+
+ OnSelfDestructChange SelfDestructHook
+
+ // State access events
+ OnAccountRead AccountReadHook
+ OnStorageRead StorageReadHook
}
// BalanceChangeReason is used to indicate the reason for a balance change, useful
@@ -422,4 +443,35 @@ const (
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
+
+ // NonceChangeSelfdestruct is emitted when the nonce is reset to zero due to a self-destruct
+ NonceChangeSelfdestruct NonceChangeReason = 7
+)
+
+// CodeChangeReason is used to indicate the reason for a code change.
+type CodeChangeReason byte
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=CodeChangeReason -trimprefix=CodeChange -output gen_code_change_reason_stringer.go
+
+const (
+ CodeChangeUnspecified CodeChangeReason = 0
+
+ // CodeChangeContractCreation is when a new contract is deployed via CREATE/CREATE2 operations.
+ CodeChangeContractCreation CodeChangeReason = 1
+
+ // CodeChangeGenesis is when contract code is set during blockchain genesis or initial setup.
+ CodeChangeGenesis CodeChangeReason = 2
+
+ // CodeChangeAuthorization is when code is set via EIP-7702 Set Code Authorization.
+ CodeChangeAuthorization CodeChangeReason = 3
+
+ // CodeChangeAuthorizationClear is when EIP-7702 delegation is cleared by setting to zero address.
+ CodeChangeAuthorizationClear CodeChangeReason = 4
+
+ // CodeChangeSelfDestruct is when contract code is cleared due to self-destruct.
+ CodeChangeSelfDestruct CodeChangeReason = 5
+
+ // CodeChangeRevert is emitted when the code is reverted back to a previous value due to call failure.
+ // It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
+ CodeChangeRevert CodeChangeReason = 6
)
diff --git a/core/tracing/journal.go b/core/tracing/journal.go
index 8937d4c5ae..04a22fc41b 100644
--- a/core/tracing/journal.go
+++ b/core/tracing/journal.go
@@ -95,6 +95,10 @@ func (j *journal) snapshot() {
// revert reverts all state changes up to the last tracked revision.
func (j *journal) revert(hooks *Hooks) {
+ // Guard against empty revisions (can happen with concurrent access)
+ if len(j.revisions) == 0 {
+ return
+ }
// Replay the journal entries above the last revision to undo changes,
// then remove the reverted changes from the journal.
rev := j.revisions[len(j.revisions)-1]
@@ -108,7 +112,10 @@ func (j *journal) revert(hooks *Hooks) {
// popRevision removes an item from the revision stack. This basically forgets about
// the last call to snapshot() and moves to the one prior.
func (j *journal) popRevision() {
- j.revisions = j.revisions[:len(j.revisions)-1]
+ // Guard against empty revisions (can happen with concurrent access)
+ if len(j.revisions) > 0 {
+ j.revisions = j.revisions[:len(j.revisions)-1]
+ }
}
// OnTxEnd resets the journal since each transaction has its own EVM call stack.
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index 22193df477..6600cbc0b8 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -2393,8 +2393,8 @@ func TestSetCodeTransactions(t *testing.T) {
pending: 1,
run: func(name string) {
aa := common.Address{0xaa, 0xaa}
- statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...))
- statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)})
+ statedb.SetCode(addrA, append(types.DelegationPrefix, aa.Bytes()...), tracing.CodeChangeUnspecified)
+ statedb.SetCode(aa, []byte{byte(vm.ADDRESS), byte(vm.PUSH0), byte(vm.SSTORE)}, tracing.CodeChangeUnspecified)
// Send gapped transaction, it should be rejected.
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), keyA)); !errors.Is(err, ErrOutOfOrderTxFromDelegated) {
@@ -2418,7 +2418,7 @@ func TestSetCodeTransactions(t *testing.T) {
}
// Reset the delegation, avoid leaking state into the other tests
- statedb.SetCode(addrA, nil)
+ statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified)
},
},
{
@@ -2684,7 +2684,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) {
}
// Simulate the chain moving
blockchain.statedb.SetNonce(addrA, 1, tracing.NonceChangeAuthorization)
- blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address))
+ blockchain.statedb.SetCode(addrA, types.AddressToDelegation(auth.Address), tracing.CodeChangeUnspecified)
<-pool.requestReset(nil, nil)
// Set an authorization for 0x00
auth, _ = types.SignSetCode(keyA, types.SetCodeAuthorization{
@@ -2702,7 +2702,7 @@ func TestSetCodeTransactionsReorg(t *testing.T) {
}
// Simulate the chain moving
blockchain.statedb.SetNonce(addrA, 2, tracing.NonceChangeAuthorization)
- blockchain.statedb.SetCode(addrA, nil)
+ blockchain.statedb.SetCode(addrA, nil, tracing.CodeChangeUnspecified)
<-pool.requestReset(nil, nil)
// Now send two transactions from addrA
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1000), keyA)); err != nil {
diff --git a/core/types.go b/core/types.go
index e27395f5c3..f70f0e789c 100644
--- a/core/types.go
+++ b/core/types.go
@@ -49,8 +49,6 @@ type Prefetcher interface {
Prefetch(transactions types.Transactions, header *types.Header, gasLimit uint64, statedb *state.StateDB, cfg vm.Config, interrupt *atomic.Bool)
// PrefetchMining used for pre-caching transaction signatures and state trie nodes. Only used for mining stage.
PrefetchMining(txs TransactionsByPriceAndNonce, header *types.Header, gasLimit uint64, statedb *state.StateDB, cfg vm.Config, interruptCh <-chan struct{}, txCurr **types.Transaction)
- // prefetch based on block access list
- PrefetchBAL(block *types.Block, statedb *state.StateDB, interruptChan <-chan struct{})
}
// Processor is an interface for processing blocks using a given initial state.
@@ -67,4 +65,5 @@ type ProcessResult struct {
Requests [][]byte
Logs []*types.Log
GasUsed uint64
+ Error error
}
diff --git a/core/types/bal/bal.go b/core/types/bal/bal.go
new file mode 100644
index 0000000000..d76c98655e
--- /dev/null
+++ b/core/types/bal/bal.go
@@ -0,0 +1,704 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bal
+
+import (
+ "bytes"
+ "encoding/json"
+ "maps"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+// idxAccessListBuilder is responsible for producing the state accesses and
+// reads recorded within the scope of a single index in the access list.
+type idxAccessListBuilder struct {
+ // stores the previous values of any account data that was modified in the
+ // current index.
+ prestates map[common.Address]*accountIdxPrestate
+
+ // a stack which maintains a set of state mutations/reads for each EVM
+ // execution frame. Entering a frame appends an intermediate access list
+ // and terminating a frame merges the accesses/modifications into the
+ // intermediate access list of the calling frame.
+ accessesStack []map[common.Address]*constructionAccountAccess
+}
+
+func newAccessListBuilder() *idxAccessListBuilder {
+ return &idxAccessListBuilder{
+ make(map[common.Address]*accountIdxPrestate),
+ []map[common.Address]*constructionAccountAccess{
+ make(map[common.Address]*constructionAccountAccess),
+ },
+ }
+}
+
+func (c *idxAccessListBuilder) storageRead(address common.Address, key common.Hash) {
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+ acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
+ acctAccesses.StorageRead(key)
+}
+
+func (c *idxAccessListBuilder) accountRead(address common.Address) {
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+}
+
+func (c *idxAccessListBuilder) storageWrite(address common.Address, key, prevVal, newVal common.Hash) {
+ if _, ok := c.prestates[address]; !ok {
+ c.prestates[address] = &accountIdxPrestate{}
+ }
+ if c.prestates[address].storage == nil {
+ c.prestates[address].storage = make(map[common.Hash]common.Hash)
+ }
+ if _, ok := c.prestates[address].storage[key]; !ok {
+ c.prestates[address].storage[key] = prevVal
+ }
+
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+ acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
+ acctAccesses.StorageWrite(key, prevVal, newVal)
+}
+
+func (c *idxAccessListBuilder) balanceChange(address common.Address, prev, cur *uint256.Int) {
+ if _, ok := c.prestates[address]; !ok {
+ c.prestates[address] = &accountIdxPrestate{}
+ }
+ if c.prestates[address].balance == nil {
+ c.prestates[address].balance = prev
+ }
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+ acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
+ acctAccesses.BalanceChange(cur)
+}
+
+func (c *idxAccessListBuilder) codeChange(address common.Address, prev, cur []byte) {
+ // auth unset and selfdestruct pass code change as 'nil'
+ // however, internally in the access list accumulation of state changes,
+ // a nil field on an account means that it was never modified in the block.
+ if cur == nil {
+ cur = []byte{}
+ }
+
+ if _, ok := c.prestates[address]; !ok {
+ c.prestates[address] = &accountIdxPrestate{}
+ }
+ if c.prestates[address].code == nil {
+ c.prestates[address].code = prev
+ }
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+ acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
+
+ acctAccesses.CodeChange(cur)
+}
+
+// selfDestruct is invoked when an account which has been created and invoked
+// SENDALL in the same transaction is removed as part of transaction finalization.
+//
+// Any storage accesses/modifications performed at the contract during execution
+// are retained in the block access list as state reads.
+func (c *idxAccessListBuilder) selfDestruct(address common.Address) {
+ // convert all the account storage writes to reads, preserve the existing reads
+ access := c.accessesStack[len(c.accessesStack)-1][address]
+ for key, _ := range access.storageMutations {
+ if access.storageReads == nil {
+ access.storageReads = make(map[common.Hash]struct{})
+ }
+ access.storageReads[key] = struct{}{}
+ }
+
+ access.storageMutations = nil
+}
+
+func (c *idxAccessListBuilder) nonceChange(address common.Address, prev, cur uint64) {
+ if _, ok := c.prestates[address]; !ok {
+ c.prestates[address] = &accountIdxPrestate{}
+ }
+ if c.prestates[address].nonce == nil {
+ c.prestates[address].nonce = &prev
+ }
+ if _, ok := c.accessesStack[len(c.accessesStack)-1][address]; !ok {
+ c.accessesStack[len(c.accessesStack)-1][address] = &constructionAccountAccess{}
+ }
+ acctAccesses := c.accessesStack[len(c.accessesStack)-1][address]
+ acctAccesses.NonceChange(cur)
+}
+
+// enterScope is called after a new EVM frame has been entered.
+func (c *idxAccessListBuilder) enterScope() {
+ c.accessesStack = append(c.accessesStack, make(map[common.Address]*constructionAccountAccess))
+}
+
+// exitScope is called after an EVM call scope terminates. If the call scope
+// terminates with an error:
+// * the scope's state accesses are added to the calling scope's access list
+// * mutated accounts/storage are added into the calling scope's access list as state accesses
+// * the state mutations tracked in the parent scope are un-modified
+func (c *idxAccessListBuilder) exitScope(evmErr bool) {
+ // all storage writes in the child scope are converted into reads
+ // if there were no storage writes, the account is reported in the BAL as a read (if it wasn't already in the BAL and/or mutated previously)
+ childAccessList := c.accessesStack[len(c.accessesStack)-1]
+ parentAccessList := c.accessesStack[len(c.accessesStack)-2]
+
+ for addr, childAccess := range childAccessList {
+ if _, ok := parentAccessList[addr]; ok {
+ } else {
+ parentAccessList[addr] = &constructionAccountAccess{}
+ }
+ if evmErr {
+ parentAccessList[addr].MergeReads(childAccess)
+ } else {
+ parentAccessList[addr].Merge(childAccess)
+ }
+ }
+
+ c.accessesStack = c.accessesStack[:len(c.accessesStack)-1]
+}
+
+// finalise returns the net state mutations at the access list index as well as
+// state which was accessed. The idxAccessListBuilder instance should be discarded
+// after calling finalise.
+func (c *idxAccessListBuilder) finalise() (*StateDiff, StateAccesses) {
+ diff := &StateDiff{Mutations: make(map[common.Address]*AccountMutations)}
+ stateAccesses := make(StateAccesses)
+
+ for addr, access := range c.accessesStack[0] {
+ // remove any mutations from the access list with no net difference vs the tx prestate value
+ if access.nonce != nil && *c.prestates[addr].nonce == *access.nonce {
+ access.nonce = nil
+ }
+ if access.balance != nil && c.prestates[addr].balance.Eq(access.balance) {
+ access.balance = nil
+ }
+
+ if access.code != nil && bytes.Equal(access.code, c.prestates[addr].code) {
+ access.code = nil
+ }
+ if access.storageMutations != nil {
+ for key, val := range access.storageMutations {
+ if c.prestates[addr].storage[key] == val {
+ delete(access.storageMutations, key)
+ access.storageReads[key] = struct{}{}
+ }
+ }
+ if len(access.storageMutations) == 0 {
+ access.storageMutations = nil
+ }
+ }
+
+ // if the account has no net mutations against the tx prestate, only include
+ // it in the state read set
+ if len(access.code) == 0 && access.nonce == nil && access.balance == nil && len(access.storageMutations) == 0 {
+ stateAccesses[addr] = make(map[common.Hash]struct{})
+ if access.storageReads != nil {
+ stateAccesses[addr] = access.storageReads
+ }
+ continue
+ }
+
+ stateAccesses[addr] = access.storageReads
+ diff.Mutations[addr] = &AccountMutations{
+ Balance: access.balance,
+ Nonce: access.nonce,
+ Code: access.code,
+ StorageWrites: access.storageMutations,
+ }
+ }
+
+ return diff, stateAccesses
+}
+
+// FinaliseIdxChanges records all pending state mutations/accesses in the
+// access list at the given index. The set of pending state mutations/accesse are
+// then emptied.
+func (c *AccessListBuilder) FinaliseIdxChanges(idx uint16) {
+ pendingDiff, pendingAccesses := c.idxBuilder.finalise()
+ c.idxBuilder = newAccessListBuilder()
+
+ // if any of the newly-written storage slots were previously
+ // accessed, they must be removed from the accessed state set.
+ for addr, pendingAcctDiff := range pendingDiff.Mutations {
+ finalizedAcctChanges, ok := c.FinalizedAccesses[addr]
+ if !ok {
+ finalizedAcctChanges = &ConstructionAccountAccesses{}
+ c.FinalizedAccesses[addr] = finalizedAcctChanges
+ }
+
+ if pendingAcctDiff.Nonce != nil {
+ if finalizedAcctChanges.NonceChanges == nil {
+ finalizedAcctChanges.NonceChanges = make(map[uint16]uint64)
+ }
+ finalizedAcctChanges.NonceChanges[idx] = *pendingAcctDiff.Nonce
+ }
+ if pendingAcctDiff.Balance != nil {
+ if finalizedAcctChanges.BalanceChanges == nil {
+ finalizedAcctChanges.BalanceChanges = make(map[uint16]*uint256.Int)
+ }
+ finalizedAcctChanges.BalanceChanges[idx] = pendingAcctDiff.Balance
+ }
+ if pendingAcctDiff.Code != nil {
+ if finalizedAcctChanges.CodeChanges == nil {
+ finalizedAcctChanges.CodeChanges = make(map[uint16]CodeChange)
+ }
+ finalizedAcctChanges.CodeChanges[idx] = CodeChange{idx, pendingAcctDiff.Code}
+ }
+ if pendingAcctDiff.StorageWrites != nil {
+ if finalizedAcctChanges.StorageWrites == nil {
+ finalizedAcctChanges.StorageWrites = make(map[common.Hash]map[uint16]common.Hash)
+ }
+ for key, val := range pendingAcctDiff.StorageWrites {
+ if _, ok := finalizedAcctChanges.StorageWrites[key]; !ok {
+ finalizedAcctChanges.StorageWrites[key] = make(map[uint16]common.Hash)
+ }
+ finalizedAcctChanges.StorageWrites[key][idx] = val
+
+ // TODO: commenting this 'if' results in no test failures.
+ // double-check that this edge-case was fixed by a future
+ // release of the eest BAL tests.
+ if _, ok := finalizedAcctChanges.StorageReads[key]; ok {
+ delete(finalizedAcctChanges.StorageReads, key)
+ }
+ }
+ }
+ }
+ // record pending accesses in the BAL access set unless they were
+ // already written in a previous index
+ for addr, pendingAccountAccesses := range pendingAccesses {
+ finalizedAcctAccesses, ok := c.FinalizedAccesses[addr]
+ if !ok {
+ finalizedAcctAccesses = &ConstructionAccountAccesses{}
+ c.FinalizedAccesses[addr] = finalizedAcctAccesses
+ }
+
+ for key := range pendingAccountAccesses {
+ if _, ok := finalizedAcctAccesses.StorageWrites[key]; ok {
+ continue
+ }
+ if finalizedAcctAccesses.StorageReads == nil {
+ finalizedAcctAccesses.StorageReads = make(map[common.Hash]struct{})
+ }
+ finalizedAcctAccesses.StorageReads[key] = struct{}{}
+ }
+ }
+ c.lastFinalizedMutations = pendingDiff
+ c.lastFinalizedAccesses = pendingAccesses
+}
+
+func (c *AccessListBuilder) StorageRead(address common.Address, key common.Hash) {
+ c.idxBuilder.storageRead(address, key)
+}
+func (c *AccessListBuilder) AccountRead(address common.Address) {
+ c.idxBuilder.accountRead(address)
+}
+func (c *AccessListBuilder) StorageWrite(address common.Address, key, prevVal, newVal common.Hash) {
+ c.idxBuilder.storageWrite(address, key, prevVal, newVal)
+}
+func (c *AccessListBuilder) BalanceChange(address common.Address, prev, cur *uint256.Int) {
+ c.idxBuilder.balanceChange(address, prev, cur)
+}
+func (c *AccessListBuilder) NonceChange(address common.Address, prev, cur uint64) {
+ c.idxBuilder.nonceChange(address, prev, cur)
+}
+func (c *AccessListBuilder) CodeChange(address common.Address, prev, cur []byte) {
+ c.idxBuilder.codeChange(address, prev, cur)
+}
+func (c *AccessListBuilder) SelfDestruct(address common.Address) {
+ c.idxBuilder.selfDestruct(address)
+}
+
+func (c *AccessListBuilder) EnterScope() {
+ c.idxBuilder.enterScope()
+}
+func (c *AccessListBuilder) ExitScope(executionErr bool) {
+ c.idxBuilder.exitScope(executionErr)
+}
+
+// CodeChange contains the runtime bytecode deployed at an address and the
+// transaction index where the deployment took place.
+type CodeChange struct {
+ TxIdx uint16
+ Code []byte `json:"code,omitempty"`
+}
+
+// ConstructionAccountAccesses contains post-block account state for mutations as well as
+// all storage keys that were read during execution. It is used when building block
+// access list during execution.
+type ConstructionAccountAccesses struct {
+ // StorageWrites is the post-state values of an account's storage slots
+ // that were modified in a block, keyed by the slot key and the tx index
+ // where the modification occurred.
+ StorageWrites map[common.Hash]map[uint16]common.Hash
+
+ // StorageReads is the set of slot keys that were accessed during block
+ // execution.
+ //
+ // Storage slots which are both read and written (with changed values)
+ // appear only in StorageWrites.
+ StorageReads map[common.Hash]struct{}
+
+ // BalanceChanges contains the post-transaction balances of an account,
+ // keyed by transaction indices where it was changed.
+ BalanceChanges map[uint16]*uint256.Int
+
+ // NonceChanges contains the post-state nonce values of an account keyed
+ // by tx index.
+ NonceChanges map[uint16]uint64
+
+ CodeChanges map[uint16]CodeChange
+}
+
+// constructionAccountAccess contains fields for an account which were modified
+// during execution of the current access list index.
+// It also accumulates a set of storage slots which were accessed but not
+// modified.
+type constructionAccountAccess struct {
+ code []byte
+ nonce *uint64
+ balance *uint256.Int
+
+ storageMutations map[common.Hash]common.Hash
+ storageReads map[common.Hash]struct{}
+}
+
+// Merge adds the accesses/mutations from other into the calling instance. If
+func (c *constructionAccountAccess) Merge(other *constructionAccountAccess) {
+ if other.code != nil {
+ c.code = other.code
+ }
+ if other.nonce != nil {
+ c.nonce = other.nonce
+ }
+ if other.balance != nil {
+ c.balance = other.balance
+ }
+ if other.storageMutations != nil {
+ if c.storageMutations == nil {
+ c.storageMutations = make(map[common.Hash]common.Hash)
+ }
+ for key, val := range other.storageMutations {
+ c.storageMutations[key] = val
+ delete(c.storageReads, key)
+ }
+ }
+ if other.storageReads != nil {
+ if c.storageReads == nil {
+ c.storageReads = make(map[common.Hash]struct{})
+ }
+ // TODO: if the state was mutated in the caller, don't add it to the caller's reads.
+ // need to have a test case for this, verify it fails in the current state, and then fix this bug.
+ for key, val := range other.storageReads {
+ c.storageReads[key] = val
+ }
+ }
+}
+
+// MergeReads merges accesses from a reverted execution from:
+// * any reads/writes from the reverted frame which weren't mutated
+// in the current frame, are merged into the current frame as reads.
+func (c *constructionAccountAccess) MergeReads(other *constructionAccountAccess) {
+ if other.storageMutations != nil {
+ if c.storageReads == nil {
+ c.storageReads = make(map[common.Hash]struct{})
+ }
+ for key, _ := range other.storageMutations {
+ if _, ok := c.storageMutations[key]; ok {
+ continue
+ }
+ c.storageReads[key] = struct{}{}
+ }
+ }
+ if other.storageReads != nil {
+ if c.storageReads == nil {
+ c.storageReads = make(map[common.Hash]struct{})
+ }
+ for key := range other.storageReads {
+ if _, ok := c.storageMutations[key]; ok {
+ continue
+ }
+ c.storageReads[key] = struct{}{}
+ }
+ }
+}
+
+func (c *constructionAccountAccess) StorageRead(key common.Hash) {
+ if c.storageReads == nil {
+ c.storageReads = make(map[common.Hash]struct{})
+ }
+ if _, ok := c.storageMutations[key]; !ok {
+ c.storageReads[key] = struct{}{}
+ }
+}
+
+func (c *constructionAccountAccess) StorageWrite(key, prevVal, newVal common.Hash) {
+ if c.storageMutations == nil {
+ c.storageMutations = make(map[common.Hash]common.Hash)
+ }
+ c.storageMutations[key] = newVal
+ // a key can be first read and later written, but it must only show up
+ // in either read or write sets, not both.
+ //
+ // the caller should not
+ // call StorageRead on a slot that was already written
+ delete(c.storageReads, key)
+}
+
+func (c *constructionAccountAccess) BalanceChange(cur *uint256.Int) {
+ c.balance = cur
+}
+
+func (c *constructionAccountAccess) CodeChange(cur []byte) {
+ c.code = cur
+}
+
+func (c *constructionAccountAccess) NonceChange(cur uint64) {
+ c.nonce = &cur
+}
+
+// AccessListBuilder is used to build an EIP-7928 block access list
+type AccessListBuilder struct {
+ FinalizedAccesses map[common.Address]*ConstructionAccountAccesses
+
+ idxBuilder *idxAccessListBuilder
+
+ lastFinalizedMutations *StateDiff
+ lastFinalizedAccesses StateAccesses
+}
+
+// NewAccessListBuilder instantiates an empty access list.
+func NewAccessListBuilder() *AccessListBuilder {
+ return &AccessListBuilder{
+ make(map[common.Address]*ConstructionAccountAccesses),
+ newAccessListBuilder(),
+ nil,
+ nil,
+ }
+}
+
+// Copy returns a deep copy of the access list.
+func (c *AccessListBuilder) Copy() *AccessListBuilder {
+ res := NewAccessListBuilder()
+ for addr, aa := range c.FinalizedAccesses {
+ var aaCopy ConstructionAccountAccesses
+
+ slotWrites := make(map[common.Hash]map[uint16]common.Hash, len(aa.StorageWrites))
+ for key, m := range aa.StorageWrites {
+ slotWrites[key] = maps.Clone(m)
+ }
+ aaCopy.StorageWrites = slotWrites
+ aaCopy.StorageReads = maps.Clone(aa.StorageReads)
+
+ balances := make(map[uint16]*uint256.Int, len(aa.BalanceChanges))
+ for index, balance := range aa.BalanceChanges {
+ balances[index] = balance.Clone()
+ }
+ aaCopy.BalanceChanges = balances
+ aaCopy.NonceChanges = maps.Clone(aa.NonceChanges)
+
+ codeChangesCopy := make(map[uint16]CodeChange)
+ for idx, codeChange := range aa.CodeChanges {
+ codeChangesCopy[idx] = CodeChange{
+ TxIdx: idx,
+ Code: bytes.Clone(codeChange.Code),
+ }
+ }
+ res.FinalizedAccesses[addr] = &aaCopy
+ }
+ return res
+}
+
+// FinalizedIdxChanges returns the state mutations and accesses recorded in the latest
+// access list index that was finalized.
+func (c *AccessListBuilder) FinalizedIdxChanges() (*StateDiff, StateAccesses) {
+ return c.lastFinalizedMutations, c.lastFinalizedAccesses
+}
+
+// StateDiff contains state mutations occurring over one or more access list
+// index.
+type StateDiff struct {
+ Mutations map[common.Address]*AccountMutations `json:"Mutations,omitempty"`
+}
+
+// StateAccesses contains a set of accounts/storage that were accessed during the
+// execution of one or more access list indices.
+type StateAccesses map[common.Address]map[common.Hash]struct{}
+
+// Merge combines adds the accesses from other into s.
+func (s *StateAccesses) Merge(other StateAccesses) {
+ for addr, accesses := range other {
+ if _, ok := (*s)[addr]; !ok {
+ (*s)[addr] = make(map[common.Hash]struct{})
+ }
+ for slot := range accesses {
+ (*s)[addr][slot] = struct{}{}
+ }
+ }
+}
+
+// accountIdxPrestate records the account prestate at a access list index
+// for components which were modified at that index.
+type accountIdxPrestate struct {
+ balance *uint256.Int
+ nonce *uint64
+ code ContractCode
+ storage map[common.Hash]common.Hash
+}
+
+// AccountMutations contains mutations that were made to an account across
+// one or more access list indices.
+type AccountMutations struct {
+ Balance *uint256.Int `json:"Balance,omitempty"`
+ Nonce *uint64 `json:"Nonce,omitempty"`
+ Code ContractCode `json:"Code,omitempty"`
+ StorageWrites map[common.Hash]common.Hash `json:"StorageWrites,omitempty"`
+}
+
+// String returns a human-readable JSON representation of the account mutations.
+func (a *AccountMutations) String() string {
+ var res bytes.Buffer
+ enc := json.NewEncoder(&res)
+ enc.SetIndent("", " ")
+ enc.Encode(a)
+ return res.String()
+}
+
+// Eq returns whether the calling instance is equal to the provided one.
+func (a *AccountMutations) Eq(other *AccountMutations) bool {
+ if a.Balance != nil || other.Balance != nil {
+ if a.Balance == nil || other.Balance == nil {
+ return false
+ }
+
+ if !a.Balance.Eq(other.Balance) {
+ return false
+ }
+ }
+
+ if (len(a.Code) != 0 || len(other.Code) != 0) && !bytes.Equal(a.Code, other.Code) {
+ return false
+ }
+
+ if a.Nonce != nil || other.Nonce != nil {
+ if a.Nonce == nil || other.Nonce == nil {
+ return false
+ }
+
+ if *a.Nonce != *other.Nonce {
+ return false
+ }
+ }
+
+ if a.StorageWrites != nil || other.StorageWrites != nil {
+ if a.StorageWrites == nil || other.StorageWrites == nil {
+ return false
+ }
+
+ if !maps.Equal(a.StorageWrites, other.StorageWrites) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy returns a deep-copy of the instance.
+func (a *AccountMutations) Copy() *AccountMutations {
+ res := &AccountMutations{
+ nil,
+ nil,
+ nil,
+ nil,
+ }
+ if a.Nonce != nil {
+ res.Nonce = new(uint64)
+ *res.Nonce = *a.Nonce
+ }
+ if a.Code != nil {
+ res.Code = bytes.Clone(a.Code)
+ }
+ if a.Balance != nil {
+ res.Balance = new(uint256.Int).Set(a.Balance)
+ }
+ if a.StorageWrites != nil {
+ res.StorageWrites = maps.Clone(a.StorageWrites)
+ }
+ return res
+}
+
+// String returns the state diff as a formatted JSON string.
+func (s *StateDiff) String() string {
+ var res bytes.Buffer
+ enc := json.NewEncoder(&res)
+ enc.SetIndent("", " ")
+ enc.Encode(s)
+ return res.String()
+}
+
+// Merge merges the state changes present in next into the caller. After,
+// the state of the caller is the aggregate diff through next.
+func (s *StateDiff) Merge(next *StateDiff) {
+ for account, diff := range next.Mutations {
+ if mut, ok := s.Mutations[account]; ok {
+ if diff.Balance != nil {
+ mut.Balance = diff.Balance
+ }
+ if diff.Code != nil {
+ mut.Code = diff.Code
+ }
+ if diff.Nonce != nil {
+ mut.Nonce = diff.Nonce
+ }
+ if len(diff.StorageWrites) > 0 {
+ if mut.StorageWrites == nil {
+ mut.StorageWrites = maps.Clone(diff.StorageWrites)
+ } else {
+ for key, val := range diff.StorageWrites {
+ mut.StorageWrites[key] = val
+ }
+ }
+ }
+ } else {
+ s.Mutations[account] = diff.Copy()
+ }
+ }
+}
+
+// Copy returns a deep copy of the StateDiff
+func (s *StateDiff) Copy() *StateDiff {
+ res := &StateDiff{make(map[common.Address]*AccountMutations)}
+ for addr, accountDiff := range s.Mutations {
+ cpy := accountDiff.Copy()
+ res.Mutations[addr] = cpy
+ }
+ return res
+}
+
+// Copy returns a deep copy of the access list
+func (e BlockAccessList) Copy() (res BlockAccessList) {
+ for _, accountAccess := range e {
+ res = append(res, accountAccess.Copy())
+ }
+ return
+}
diff --git a/core/types/bal/bal_encoding.go b/core/types/bal/bal_encoding.go
new file mode 100644
index 0000000000..0f6849dda4
--- /dev/null
+++ b/core/types/bal/bal_encoding.go
@@ -0,0 +1,345 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bal
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "slices"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/uint256"
+)
+
+//go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -out bal_encoding_rlp_generated.go -type AccountAccess -decoder
+
+// These are objects used as input for the access list encoding. They mirror
+// the spec format.
+
+// BlockAccessList is the encoding format of AccessListBuilder.
+type BlockAccessList []AccountAccess
+
+func (e BlockAccessList) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ l := w.List()
+ for _, access := range e {
+ access.EncodeRLP(w)
+ }
+ w.ListEnd(l)
+ return w.Flush()
+}
+
+func (e *BlockAccessList) DecodeRLP(dec *rlp.Stream) error {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ *e = (*e)[:0]
+ for dec.MoreDataInList() {
+ var access AccountAccess
+ if err := access.DecodeRLP(dec); err != nil {
+ return err
+ }
+ *e = append(*e, access)
+ }
+ dec.ListEnd()
+ return nil
+}
+
+// StringableRepresentation returns an instance of the block access list
+// which can be converted to a human-readable JSON representation.
+func (e *BlockAccessList) StringableRepresentation() interface{} {
+ res := []AccountAccess{}
+ for _, aa := range *e {
+ res = append(res, aa)
+ }
+ return &res
+}
+
+func (e *BlockAccessList) String() string {
+ var res bytes.Buffer
+ enc := json.NewEncoder(&res)
+ enc.SetIndent("", " ")
+ // TODO: check error
+ enc.Encode(e)
+ return res.String()
+}
+
+// Validate returns an error if the contents of the access list are not ordered
+// according to the spec or any code changes are contained which exceed protocol
+// max code size.
+func (e BlockAccessList) Validate() error {
+ if !slices.IsSortedFunc(e, func(a, b AccountAccess) int {
+ return bytes.Compare(a.Address[:], b.Address[:])
+ }) {
+ return errors.New("block access list accounts not in lexicographic order")
+ }
+ for _, entry := range e {
+ if err := entry.validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Hash computes the keccak256 hash of the access list
+func (e *BlockAccessList) Hash() common.Hash {
+ var enc bytes.Buffer
+ err := e.EncodeRLP(&enc)
+ if err != nil {
+ // errors here are related to BAL values exceeding maximum size defined
+ // by the spec. Hard-fail because these cases are not expected to be hit
+ // under reasonable conditions.
+ panic(err)
+ }
+ return crypto.Keccak256Hash(enc.Bytes())
+}
+
+// encodingBalanceChange is the encoding format of BalanceChange.
+type encodingBalanceChange struct {
+ TxIdx uint16 `json:"txIndex"`
+ Balance *uint256.Int `json:"balance"`
+}
+
+// encodingAccountNonce is the encoding format of NonceChange.
+type encodingAccountNonce struct {
+ TxIdx uint16 `json:"txIndex"`
+ Nonce uint64 `json:"nonce"`
+}
+
+// encodingStorageWrite is the encoding format of StorageWrites.
+type encodingStorageWrite struct {
+ TxIdx uint16 `json:"txIndex"`
+ ValueAfter common.Hash `json:"valueAfter"`
+}
+
+// encodingStorageWrite is the encoding format of SlotWrites.
+type encodingSlotWrites struct {
+ Slot common.Hash `json:"slot"`
+ Accesses []encodingStorageWrite `json:"accesses"`
+}
+
+// validate returns an instance of the encoding-representation slot writes in
+// working representation.
+func (e *encodingSlotWrites) validate() error {
+ if slices.IsSortedFunc(e.Accesses, func(a, b encodingStorageWrite) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ }) {
+ return nil
+ }
+ return errors.New("storage write tx indices not in order")
+}
+
+// AccountAccess is the encoding format of ConstructionAccountAccesses.
+type AccountAccess struct {
+ Address common.Address `json:"address,omitempty"` // 20-byte Ethereum address
+ StorageChanges []encodingSlotWrites `json:"storageChanges,omitempty"` // Storage changes (slot -> [tx_index -> new_value])
+ StorageReads []common.Hash `json:"storageReads,omitempty"` // Read-only storage keys
+ BalanceChanges []encodingBalanceChange `json:"balanceChanges,omitempty"` // Balance changes ([tx_index -> post_balance])
+ NonceChanges []encodingAccountNonce `json:"nonceChanges,omitempty"` // Nonce changes ([tx_index -> new_nonce])
+ CodeChanges []CodeChange `json:"code,omitempty"` // CodeChanges changes ([tx_index -> new_code])
+}
+
+// validate converts the account accesses out of encoding format.
+// If any of the keys in the encoding object are not ordered according to the
+// spec, an error is returned.
+func (e *AccountAccess) validate() error {
+ // Check the storage write slots are sorted in order
+ if !slices.IsSortedFunc(e.StorageChanges, func(a, b encodingSlotWrites) int {
+ return bytes.Compare(a.Slot[:], b.Slot[:])
+ }) {
+ return errors.New("storage writes slots not in lexicographic order")
+ }
+ for _, write := range e.StorageChanges {
+ if err := write.validate(); err != nil {
+ return err
+ }
+ }
+ // test case ideas: keys in both read/writes, duplicate keys in either read/writes
+ // ensure that the read and write key sets are distinct
+ readKeys := make(map[common.Hash]struct{})
+ writeKeys := make(map[common.Hash]struct{})
+ for _, readKey := range e.StorageReads {
+ if _, ok := readKeys[readKey]; ok {
+ return errors.New("duplicate read key")
+ }
+ readKeys[readKey] = struct{}{}
+ }
+ for _, write := range e.StorageChanges {
+ writeKey := write.Slot
+ if _, ok := writeKeys[writeKey]; ok {
+ return errors.New("duplicate write key")
+ }
+ writeKeys[writeKey] = struct{}{}
+ }
+
+ for readKey := range readKeys {
+ if _, ok := writeKeys[readKey]; ok {
+ return errors.New("storage key reported in both read/write sets")
+ }
+ }
+
+ // Check the storage read slots are sorted in order
+ if !slices.IsSortedFunc(e.StorageReads, func(a, b common.Hash) int {
+ return bytes.Compare(a[:], b[:])
+ }) {
+ return errors.New("storage read slots not in lexicographic order")
+ }
+
+ // Check the balance changes are sorted in order
+ if !slices.IsSortedFunc(e.BalanceChanges, func(a, b encodingBalanceChange) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ }) {
+ return errors.New("balance changes not in ascending order by tx index")
+ }
+
+ // Check the nonce changes are sorted in order
+ if !slices.IsSortedFunc(e.NonceChanges, func(a, b encodingAccountNonce) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ }) {
+ return errors.New("nonce changes not in ascending order by tx index")
+ }
+
+ // Convert code change
+ for _, codeChange := range e.CodeChanges {
+ if len(codeChange.Code) > params.MaxCodeSize {
+ return fmt.Errorf("code change contained oversized code")
+ }
+ }
+ return nil
+}
+
+// Copy returns a deep copy of the account access
+func (e *AccountAccess) Copy() AccountAccess {
+ res := AccountAccess{
+ Address: e.Address,
+ StorageReads: slices.Clone(e.StorageReads),
+ BalanceChanges: slices.Clone(e.BalanceChanges),
+ NonceChanges: slices.Clone(e.NonceChanges),
+ }
+ for _, storageWrite := range e.StorageChanges {
+ res.StorageChanges = append(res.StorageChanges, encodingSlotWrites{
+ Slot: storageWrite.Slot,
+ Accesses: slices.Clone(storageWrite.Accesses),
+ })
+ }
+ for _, codeChange := range e.CodeChanges {
+ res.CodeChanges = append(res.CodeChanges,
+ CodeChange{
+ codeChange.TxIdx,
+ bytes.Clone(codeChange.Code),
+ })
+ }
+ return res
+}
+
+// EncodeRLP returns the RLP-encoded access list
+func (c *AccessListBuilder) EncodeRLP(wr io.Writer) error {
+ return c.ToEncodingObj().EncodeRLP(wr)
+}
+
+var _ rlp.Encoder = &AccessListBuilder{}
+
+// toEncodingObj creates an instance of the ConstructionAccountAccesses of the type that is
+// used as input for the encoding.
+func (a *ConstructionAccountAccesses) toEncodingObj(addr common.Address) AccountAccess {
+ res := AccountAccess{Address: addr}
+
+ // Convert write slots
+ writeSlots := slices.Collect(maps.Keys(a.StorageWrites))
+ slices.SortFunc(writeSlots, common.Hash.Cmp)
+ for _, slot := range writeSlots {
+ var obj encodingSlotWrites
+ obj.Slot = slot
+
+ slotWrites := a.StorageWrites[slot]
+ obj.Accesses = make([]encodingStorageWrite, 0, len(slotWrites))
+
+ indices := slices.Collect(maps.Keys(slotWrites))
+ slices.SortFunc(indices, cmp.Compare[uint16])
+ for _, index := range indices {
+ obj.Accesses = append(obj.Accesses, encodingStorageWrite{
+ TxIdx: index,
+ ValueAfter: slotWrites[index],
+ })
+ }
+ res.StorageChanges = append(res.StorageChanges, obj)
+ }
+
+ // Convert read slots
+ readSlots := slices.Collect(maps.Keys(a.StorageReads))
+ slices.SortFunc(readSlots, common.Hash.Cmp)
+ for _, slot := range readSlots {
+ res.StorageReads = append(res.StorageReads, slot)
+ }
+
+ // Convert balance changes
+ balanceIndices := slices.Collect(maps.Keys(a.BalanceChanges))
+ slices.SortFunc(balanceIndices, cmp.Compare[uint16])
+ for _, idx := range balanceIndices {
+ res.BalanceChanges = append(res.BalanceChanges, encodingBalanceChange{
+ TxIdx: idx,
+ Balance: new(uint256.Int).Set(a.BalanceChanges[idx]),
+ })
+ }
+
+ // Convert nonce changes
+ nonceIndices := slices.Collect(maps.Keys(a.NonceChanges))
+ slices.SortFunc(nonceIndices, cmp.Compare[uint16])
+ for _, idx := range nonceIndices {
+ res.NonceChanges = append(res.NonceChanges, encodingAccountNonce{
+ TxIdx: idx,
+ Nonce: a.NonceChanges[idx],
+ })
+ }
+
+ // Convert code change
+ codeChangeIdxs := slices.Collect(maps.Keys(a.CodeChanges))
+ slices.SortFunc(codeChangeIdxs, cmp.Compare[uint16])
+ for _, idx := range codeChangeIdxs {
+ res.CodeChanges = append(res.CodeChanges, CodeChange{
+ idx,
+ bytes.Clone(a.CodeChanges[idx].Code),
+ })
+ }
+ return res
+}
+
+// ToEncodingObj returns an instance of the access list expressed as the type
+// which is used as input for the encoding/decoding.
+func (c *AccessListBuilder) ToEncodingObj() *BlockAccessList {
+ var addresses []common.Address
+ for addr := range c.FinalizedAccesses {
+ addresses = append(addresses, addr)
+ }
+ slices.SortFunc(addresses, common.Address.Cmp)
+
+ var res BlockAccessList
+ for _, addr := range addresses {
+ res = append(res, c.FinalizedAccesses[addr].toEncodingObj(addr))
+ }
+ return &res
+}
+
+type ContractCode []byte
diff --git a/core/types/bal/bal_encoding_json.go b/core/types/bal/bal_encoding_json.go
new file mode 100644
index 0000000000..8d495848c6
--- /dev/null
+++ b/core/types/bal/bal_encoding_json.go
@@ -0,0 +1,108 @@
+package bal
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func (c *ContractCode) MarshalJSON() ([]byte, error) {
+ hexStr := fmt.Sprintf("%x", *c)
+ return json.Marshal(hexStr)
+}
+func (e encodingBalanceChange) MarshalJSON() ([]byte, error) {
+ type Alias encodingBalanceChange
+ return json.Marshal(&struct {
+ TxIdx string `json:"txIndex"`
+ *Alias
+ }{
+ TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
+ Alias: (*Alias)(&e),
+ })
+}
+
+func (e *encodingBalanceChange) UnmarshalJSON(data []byte) error {
+ type Alias encodingBalanceChange
+ aux := &struct {
+ TxIdx string `json:"txIndex"`
+ *Alias
+ }{
+ Alias: (*Alias)(e),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
+ if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func (e encodingAccountNonce) MarshalJSON() ([]byte, error) {
+ type Alias encodingAccountNonce
+ return json.Marshal(&struct {
+ TxIdx string `json:"txIndex"`
+ Nonce string `json:"nonce"`
+ *Alias
+ }{
+ TxIdx: fmt.Sprintf("0x%x", e.TxIdx),
+ Nonce: fmt.Sprintf("0x%x", e.Nonce),
+ Alias: (*Alias)(&e),
+ })
+}
+
+func (e *encodingAccountNonce) UnmarshalJSON(data []byte) error {
+ type Alias encodingAccountNonce
+ aux := &struct {
+ TxIdx string `json:"txIndex"`
+ Nonce string `json:"nonce"`
+ *Alias
+ }{
+ Alias: (*Alias)(e),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ if len(aux.TxIdx) >= 2 && aux.TxIdx[:2] == "0x" {
+ if _, err := fmt.Sscanf(aux.TxIdx, "0x%x", &e.TxIdx); err != nil {
+ return err
+ }
+ }
+ if len(aux.Nonce) >= 2 && aux.Nonce[:2] == "0x" {
+ if _, err := fmt.Sscanf(aux.Nonce, "0x%x", &e.Nonce); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler to decode from RLP hex bytes
+func (b *BlockAccessList) UnmarshalJSON(input []byte) error {
+ // Handle both hex string and object formats
+ var hexBytes hexutil.Bytes
+ if err := json.Unmarshal(input, &hexBytes); err == nil {
+ // It's a hex string, decode from RLP
+ return rlp.DecodeBytes(hexBytes, b)
+ }
+
+ // Otherwise try to unmarshal as structured JSON
+ var tmp []AccountAccess
+ if err := json.Unmarshal(input, &tmp); err != nil {
+ return err
+ }
+ *b = BlockAccessList(tmp)
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler to encode as RLP hex bytes
+func (b BlockAccessList) MarshalJSON() ([]byte, error) {
+ // Encode to RLP then to hex
+ rlpBytes, err := rlp.EncodeToBytes(b)
+ if err != nil {
+ return nil, err
+ }
+ return json.Marshal(hexutil.Bytes(rlpBytes))
+}
diff --git a/core/types/bal/bal_encoding_rlp_generated.go b/core/types/bal/bal_encoding_rlp_generated.go
new file mode 100644
index 0000000000..5a99e1b800
--- /dev/null
+++ b/core/types/bal/bal_encoding_rlp_generated.go
@@ -0,0 +1,259 @@
+// Code generated by rlpgen. DO NOT EDIT.
+
+package bal
+
+import "github.com/ethereum/go-ethereum/common"
+import "github.com/ethereum/go-ethereum/rlp"
+import "github.com/holiman/uint256"
+import "io"
+
+func (obj *AccountAccess) EncodeRLP(_w io.Writer) error {
+ w := rlp.NewEncoderBuffer(_w)
+ _tmp0 := w.List()
+ w.WriteBytes(obj.Address[:])
+ _tmp1 := w.List()
+ for _, _tmp2 := range obj.StorageChanges {
+ _tmp3 := w.List()
+ w.WriteBytes(_tmp2.Slot[:])
+ _tmp4 := w.List()
+ for _, _tmp5 := range _tmp2.Accesses {
+ _tmp6 := w.List()
+ w.WriteUint64(uint64(_tmp5.TxIdx))
+ w.WriteBytes(_tmp5.ValueAfter[:])
+ w.ListEnd(_tmp6)
+ }
+ w.ListEnd(_tmp4)
+ w.ListEnd(_tmp3)
+ }
+ w.ListEnd(_tmp1)
+ _tmp7 := w.List()
+ for _, _tmp8 := range obj.StorageReads {
+ w.WriteBytes(_tmp8[:])
+ }
+ w.ListEnd(_tmp7)
+ _tmp9 := w.List()
+ for _, _tmp10 := range obj.BalanceChanges {
+ _tmp11 := w.List()
+ w.WriteUint64(uint64(_tmp10.TxIdx))
+ if _tmp10.Balance == nil {
+ w.Write(rlp.EmptyString)
+ } else {
+ w.WriteUint256(_tmp10.Balance)
+ }
+ w.ListEnd(_tmp11)
+ }
+ w.ListEnd(_tmp9)
+ _tmp12 := w.List()
+ for _, _tmp13 := range obj.NonceChanges {
+ _tmp14 := w.List()
+ w.WriteUint64(uint64(_tmp13.TxIdx))
+ w.WriteUint64(_tmp13.Nonce)
+ w.ListEnd(_tmp14)
+ }
+ w.ListEnd(_tmp12)
+ _tmp15 := w.List()
+ for _, _tmp16 := range obj.CodeChanges {
+ _tmp17 := w.List()
+ w.WriteUint64(uint64(_tmp16.TxIdx))
+ w.WriteBytes(_tmp16.Code)
+ w.ListEnd(_tmp17)
+ }
+ w.ListEnd(_tmp15)
+ w.ListEnd(_tmp0)
+ return w.Flush()
+}
+
+func (obj *AccountAccess) DecodeRLP(dec *rlp.Stream) error {
+ var _tmp0 AccountAccess
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Address:
+ var _tmp1 common.Address
+ if err := dec.ReadBytes(_tmp1[:]); err != nil {
+ return err
+ }
+ _tmp0.Address = _tmp1
+ // StorageChanges:
+ var _tmp2 []encodingSlotWrites
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp3 encodingSlotWrites
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // Slot:
+ var _tmp4 common.Hash
+ if err := dec.ReadBytes(_tmp4[:]); err != nil {
+ return err
+ }
+ _tmp3.Slot = _tmp4
+ // Accesses:
+ var _tmp5 []encodingStorageWrite
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp6 encodingStorageWrite
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // TxIdx:
+ _tmp7, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp6.TxIdx = _tmp7
+ // ValueAfter:
+ var _tmp8 common.Hash
+ if err := dec.ReadBytes(_tmp8[:]); err != nil {
+ return err
+ }
+ _tmp6.ValueAfter = _tmp8
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp5 = append(_tmp5, _tmp6)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp3.Accesses = _tmp5
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp2 = append(_tmp2, _tmp3)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.StorageChanges = _tmp2
+ // StorageReads:
+ var _tmp9 []common.Hash
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp10 common.Hash
+ if err := dec.ReadBytes(_tmp10[:]); err != nil {
+ return err
+ }
+ _tmp9 = append(_tmp9, _tmp10)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.StorageReads = _tmp9
+ // BalanceChanges:
+ var _tmp11 []encodingBalanceChange
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp12 encodingBalanceChange
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // TxIdx:
+ _tmp13, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp12.TxIdx = _tmp13
+ // Balance:
+ var _tmp14 uint256.Int
+ if err := dec.ReadUint256(&_tmp14); err != nil {
+ return err
+ }
+ _tmp12.Balance = &_tmp14
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp11 = append(_tmp11, _tmp12)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.BalanceChanges = _tmp11
+ // NonceChanges:
+ var _tmp15 []encodingAccountNonce
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp16 encodingAccountNonce
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // TxIdx:
+ _tmp17, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp16.TxIdx = _tmp17
+ // Nonce:
+ _tmp18, err := dec.Uint64()
+ if err != nil {
+ return err
+ }
+ _tmp16.Nonce = _tmp18
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp15 = append(_tmp15, _tmp16)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.NonceChanges = _tmp15
+ // CodeChanges:
+ var _tmp19 []CodeChange
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ for dec.MoreDataInList() {
+ var _tmp20 CodeChange
+ {
+ if _, err := dec.List(); err != nil {
+ return err
+ }
+ // TxIdx:
+ _tmp21, err := dec.Uint16()
+ if err != nil {
+ return err
+ }
+ _tmp20.TxIdx = _tmp21
+ // Code:
+ _tmp22, err := dec.Bytes()
+ if err != nil {
+ return err
+ }
+ _tmp20.Code = _tmp22
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ _tmp19 = append(_tmp19, _tmp20)
+ }
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ _tmp0.CodeChanges = _tmp19
+ if err := dec.ListEnd(); err != nil {
+ return err
+ }
+ }
+ *obj = _tmp0
+ return nil
+}
diff --git a/core/types/bal/bal_test.go b/core/types/bal/bal_test.go
new file mode 100644
index 0000000000..641087b04d
--- /dev/null
+++ b/core/types/bal/bal_test.go
@@ -0,0 +1,255 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bal
+
+import (
+ "bytes"
+ "cmp"
+ "reflect"
+ "slices"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/internal/testrand"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/holiman/uint256"
+)
+
+func equalBALs(a *BlockAccessList, b *BlockAccessList) bool {
+ if !reflect.DeepEqual(a, b) {
+ return false
+ }
+ return true
+}
+
+func makeTestConstructionBAL() *AccessListBuilder {
+ return &AccessListBuilder{
+ FinalizedAccesses: map[common.Address]*ConstructionAccountAccesses{
+ common.BytesToAddress([]byte{0xff, 0xff}): {
+ StorageWrites: map[common.Hash]map[uint16]common.Hash{
+ common.BytesToHash([]byte{0x01}): {
+ 1: common.BytesToHash([]byte{1, 2, 3, 4}),
+ 2: common.BytesToHash([]byte{1, 2, 3, 4, 5, 6}),
+ },
+ common.BytesToHash([]byte{0x10}): {
+ 20: common.BytesToHash([]byte{1, 2, 3, 4}),
+ },
+ },
+ StorageReads: map[common.Hash]struct{}{
+ common.BytesToHash([]byte{1, 2, 3, 4, 5, 6, 7}): {},
+ },
+ BalanceChanges: map[uint16]*uint256.Int{
+ 1: uint256.NewInt(100),
+ 2: uint256.NewInt(500),
+ },
+ NonceChanges: map[uint16]uint64{
+ 1: 2,
+ 2: 6,
+ },
+ CodeChanges: map[uint16]CodeChange{0: {
+ TxIdx: 0,
+ Code: common.Hex2Bytes("deadbeef"),
+ }},
+ },
+ common.BytesToAddress([]byte{0xff, 0xff, 0xff}): {
+ StorageWrites: map[common.Hash]map[uint16]common.Hash{
+ common.BytesToHash([]byte{0x01}): {
+ 2: common.BytesToHash([]byte{1, 2, 3, 4, 5, 6}),
+ 3: common.BytesToHash([]byte{1, 2, 3, 4, 5, 6, 7, 8}),
+ },
+ common.BytesToHash([]byte{0x10}): {
+ 21: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
+ },
+ },
+ StorageReads: map[common.Hash]struct{}{
+ common.BytesToHash([]byte{1, 2, 3, 4, 5, 6, 7, 8}): {},
+ },
+ BalanceChanges: map[uint16]*uint256.Int{
+ 2: uint256.NewInt(100),
+ 3: uint256.NewInt(500),
+ },
+ NonceChanges: map[uint16]uint64{
+ 1: 2,
+ },
+ },
+ },
+ }
+}
+
+// TestBALEncoding tests that a populated access list can be encoded/decoded correctly.
+func TestBALEncoding(t *testing.T) {
+ var buf bytes.Buffer
+ bal := makeTestConstructionBAL()
+ err := bal.EncodeRLP(&buf)
+ if err != nil {
+ t.Fatalf("encoding failed: %v\n", err)
+ }
+ var dec BlockAccessList
+ if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 10000000)); err != nil {
+ t.Fatalf("decoding failed: %v\n", err)
+ }
+ if dec.Hash() != bal.ToEncodingObj().Hash() {
+ t.Fatalf("encoded block hash doesn't match decoded")
+ }
+ if !equalBALs(bal.ToEncodingObj(), &dec) {
+ t.Fatal("decoded BAL doesn't match")
+ }
+}
+
+func makeTestAccountAccess(sort bool) AccountAccess {
+ var (
+ storageWrites []encodingSlotWrites
+ storageReads []common.Hash
+ balances []encodingBalanceChange
+ nonces []encodingAccountNonce
+ )
+ for i := 0; i < 5; i++ {
+ slot := encodingSlotWrites{
+ Slot: testrand.Hash(),
+ }
+ for j := 0; j < 3; j++ {
+ slot.Accesses = append(slot.Accesses, encodingStorageWrite{
+ TxIdx: uint16(2 * j),
+ ValueAfter: testrand.Hash(),
+ })
+ }
+ if sort {
+ slices.SortFunc(slot.Accesses, func(a, b encodingStorageWrite) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ })
+ }
+ storageWrites = append(storageWrites, slot)
+ }
+ if sort {
+ slices.SortFunc(storageWrites, func(a, b encodingSlotWrites) int {
+ return bytes.Compare(a.Slot[:], b.Slot[:])
+ })
+ }
+
+ for i := 0; i < 5; i++ {
+ storageReads = append(storageReads, testrand.Hash())
+ }
+ if sort {
+ slices.SortFunc(storageReads, func(a, b common.Hash) int {
+ return bytes.Compare(a[:], b[:])
+ })
+ }
+
+ for i := 0; i < 5; i++ {
+ balances = append(balances, encodingBalanceChange{
+ TxIdx: uint16(2 * i),
+ Balance: new(uint256.Int).SetBytes(testrand.Bytes(32)),
+ })
+ }
+ if sort {
+ slices.SortFunc(balances, func(a, b encodingBalanceChange) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ })
+ }
+
+ for i := 0; i < 5; i++ {
+ nonces = append(nonces, encodingAccountNonce{
+ TxIdx: uint16(2 * i),
+ Nonce: uint64(i + 100),
+ })
+ }
+ if sort {
+ slices.SortFunc(nonces, func(a, b encodingAccountNonce) int {
+ return cmp.Compare[uint16](a.TxIdx, b.TxIdx)
+ })
+ }
+
+ return AccountAccess{
+ Address: [20]byte(testrand.Bytes(20)),
+ StorageChanges: storageWrites,
+ StorageReads: storageReads,
+ BalanceChanges: balances,
+ NonceChanges: nonces,
+ CodeChanges: []CodeChange{
+ {
+ TxIdx: 100,
+ Code: testrand.Bytes(256),
+ },
+ },
+ }
+}
+
+func makeTestBAL(sort bool) BlockAccessList {
+ list := BlockAccessList{}
+ for i := 0; i < 5; i++ {
+ list = append(list, makeTestAccountAccess(sort))
+ }
+ if sort {
+ slices.SortFunc(list, func(a, b AccountAccess) int {
+ return bytes.Compare(a.Address[:], b.Address[:])
+ })
+ }
+ return list
+}
+
+func TestBlockAccessListCopy(t *testing.T) {
+ list := makeTestBAL(true)
+ cpy := list.Copy()
+ cpyCpy := cpy.Copy()
+
+ if !reflect.DeepEqual(list, cpy) {
+ t.Fatal("block access mismatch")
+ }
+ if !reflect.DeepEqual(cpy, cpyCpy) {
+ t.Fatal("block access mismatch")
+ }
+
+ // Make sure the mutations on copy won't affect the origin
+ for _, aa := range cpyCpy {
+ for i := 0; i < len(aa.StorageReads); i++ {
+ aa.StorageReads[i] = [32]byte(testrand.Bytes(32))
+ }
+ }
+ if !reflect.DeepEqual(list, cpy) {
+ t.Fatal("block access mismatch")
+ }
+}
+
+func TestBlockAccessListValidation(t *testing.T) {
+ // Validate the block access list after RLP decoding
+ enc := makeTestBAL(true)
+ if err := enc.Validate(); err != nil {
+ t.Fatalf("Unexpected validation error: %v", err)
+ }
+ var buf bytes.Buffer
+ if err := enc.EncodeRLP(&buf); err != nil {
+ t.Fatalf("Unexpected encoding error: %v", err)
+ }
+
+ var dec BlockAccessList
+ if err := dec.DecodeRLP(rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)); err != nil {
+ t.Fatalf("Unexpected RLP-decode error: %v", err)
+ }
+ if err := dec.Validate(); err != nil {
+ t.Fatalf("Unexpected validation error: %v", err)
+ }
+
+ // Validate the derived block access list
+ cBAL := makeTestConstructionBAL()
+ listB := cBAL.ToEncodingObj()
+ if err := listB.Validate(); err != nil {
+ t.Fatalf("Unexpected validation error: %v", err)
+ }
+}
+
+// BALReader test ideas
+// * BAL which doesn't have any pre-tx system contracts should return an empty state diff at idx 0
diff --git a/core/types/block.go b/core/types/block.go
index 4f4e47d9e1..63e32236ce 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types/bal"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-verkle"
)
@@ -235,123 +236,6 @@ type Body struct {
Withdrawals []*Withdrawal `rlp:"optional"`
}
-// StorageAccessItem is a single storage key that is accessed in a block.
-type StorageAccessItem struct {
- TxIndex uint32 // index of the first transaction in the block that accessed the storage
- Dirty bool // true if the storage was modified in the block, false if it was read only
- Key common.Hash
-}
-
-// AccountAccessListEncode & BlockAccessListEncode are for BAL serialization.
-type AccountAccessListEncode struct {
- TxIndex uint32 // index of the first transaction in the block that accessed the account
- Address common.Address
- StorageItems []StorageAccessItem
-}
-
-type BlockAccessListEncode struct {
- Version uint32 // Version of the access list format
- Number uint64 // number of the block that the BAL is for
- Hash common.Hash // hash of the block that the BAL is for
- SignData []byte // sign data for BAL
- Accounts []AccountAccessListEncode
-}
-
-// TxAccessListPrefetch & BlockAccessListPrefetch are for BAL prefetch
-type StorageAccessItemPrefetch struct {
- Dirty bool
- Key common.Hash
-}
-
-type TxAccessListPrefetch struct {
- Accounts map[common.Address][]StorageAccessItemPrefetch
-}
-
-type BlockAccessListPrefetch struct {
- AccessListItems map[uint32]TxAccessListPrefetch
-}
-
-func (b *BlockAccessListPrefetch) Update(aclEncode *AccountAccessListEncode) {
- if aclEncode == nil {
- return
- }
- accAddr := aclEncode.Address
- b.PrepareTxAccount(aclEncode.TxIndex, accAddr)
- for _, storageItem := range aclEncode.StorageItems {
- b.PrepareTxStorage(accAddr, storageItem)
- }
-}
-
-func (b *BlockAccessListPrefetch) PrepareTxStorage(accAddr common.Address, storageItem StorageAccessItem) {
- b.PrepareTxAccount(storageItem.TxIndex, accAddr)
- txAccessList := b.AccessListItems[storageItem.TxIndex]
- txAccessList.Accounts[accAddr] = append(txAccessList.Accounts[accAddr], StorageAccessItemPrefetch{
- Dirty: storageItem.Dirty,
- Key: storageItem.Key,
- })
-}
-func (b *BlockAccessListPrefetch) PrepareTxAccount(txIndex uint32, addr common.Address) {
- // create the tx access list if not exists
- if _, ok := b.AccessListItems[txIndex]; !ok {
- b.AccessListItems[txIndex] = TxAccessListPrefetch{
- Accounts: make(map[common.Address][]StorageAccessItemPrefetch),
- }
- }
- // create the account access list if not exists
- if _, ok := b.AccessListItems[txIndex].Accounts[addr]; !ok {
- b.AccessListItems[txIndex].Accounts[addr] = make([]StorageAccessItemPrefetch, 0)
- }
-}
-
-// BlockAccessListRecord & BlockAccessListRecord are used to record access list during tx execution.
-type AccountAccessListRecord struct {
- TxIndex uint32 // index of the first transaction in the block that accessed the account
- StorageItems map[common.Hash]StorageAccessItem
-}
-
-type BlockAccessListRecord struct {
- Version uint32 // Version of the access list format
- Accounts map[common.Address]AccountAccessListRecord
-}
-
-func (b *BlockAccessListRecord) AddAccount(addr common.Address, txIndex uint32) {
- if b == nil {
- return
- }
-
- if _, ok := b.Accounts[addr]; !ok {
- b.Accounts[addr] = AccountAccessListRecord{
- TxIndex: txIndex,
- StorageItems: make(map[common.Hash]StorageAccessItem),
- }
- }
-}
-
-func (b *BlockAccessListRecord) AddStorage(addr common.Address, key common.Hash, txIndex uint32, dirty bool) {
- if b == nil {
- return
- }
-
- if _, ok := b.Accounts[addr]; !ok {
- b.Accounts[addr] = AccountAccessListRecord{
- TxIndex: txIndex,
- StorageItems: make(map[common.Hash]StorageAccessItem),
- }
- }
-
- if _, ok := b.Accounts[addr].StorageItems[key]; !ok {
- b.Accounts[addr].StorageItems[key] = StorageAccessItem{
- TxIndex: txIndex,
- Dirty: dirty,
- Key: key,
- }
- } else {
- storageItem := b.Accounts[addr].StorageItems[key]
- storageItem.Dirty = dirty
- b.Accounts[addr].StorageItems[key] = storageItem
- }
-}
-
// Block represents an Ethereum block.
//
// Note the Block type tries to be 'immutable', and contains certain caches that rely
@@ -393,8 +277,8 @@ type Block struct {
sidecars BlobSidecars
// bal provides block access list
- bal *BlockAccessListEncode
- balSize atomic.Uint64
+ accessList *BlockAccessListEncode
+ accessListSize atomic.Uint64
}
// "external" block encoding. used for eth protocol, etc.
@@ -507,12 +391,16 @@ func CopyHeader(h *Header) *Header {
// DecodeRLP decodes a block from RLP.
func (b *Block) DecodeRLP(s *rlp.Stream) error {
- var eb extblock
+ var (
+ eb extblock
+ )
_, size, _ := s.Kind()
if err := s.Decode(&eb); err != nil {
return err
}
b.header, b.uncles, b.transactions, b.withdrawals = eb.Header, eb.Uncles, eb.Txs, eb.Withdrawals
+
+ // TODO: ensure that BAL is accounted for in size
b.size.Store(rlp.ListSize(size))
return nil
}
@@ -536,9 +424,10 @@ func (b *Block) Body() *Body {
// Accessors for body data. These do not return a copy because the content
// of the body slices does not affect the cached hash/size in block.
-func (b *Block) Uncles() []*Header { return b.uncles }
-func (b *Block) Transactions() Transactions { return b.transactions }
-func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
+func (b *Block) Uncles() []*Header { return b.uncles }
+func (b *Block) Transactions() Transactions { return b.transactions }
+func (b *Block) Withdrawals() Withdrawals { return b.withdrawals }
+func (b *Block) AccessList() *BlockAccessListEncode { return b.accessList }
func (b *Block) Transaction(hash common.Hash) *Transaction {
for _, transaction := range b.transactions {
@@ -617,16 +506,16 @@ func (b *Block) Size() uint64 {
return uint64(c)
}
-func (b *Block) BALSize() uint64 {
- if b.bal == nil {
+func (b *Block) AccessListSize() uint64 {
+ if b.accessList == nil {
return 0
}
- if size := b.balSize.Load(); size > 0 {
+ if size := b.accessListSize.Load(); size > 0 {
return size
}
c := writeCounter(0)
- rlp.Encode(&c, b.bal)
- b.balSize.Store(uint64(c))
+ rlp.Encode(&c, b.accessList)
+ b.accessListSize.Store(uint64(c))
return uint64(c)
}
@@ -642,10 +531,6 @@ func (b *Block) Sidecars() BlobSidecars {
return b.sidecars
}
-func (b *Block) BAL() *BlockAccessListEncode {
- return b.bal
-}
-
func (b *Block) CleanSidecars() {
b.sidecars = make(BlobSidecars, 0)
}
@@ -701,7 +586,7 @@ func (b *Block) WithSeal(header *Header) *Block {
withdrawals: b.withdrawals,
witness: b.witness,
sidecars: b.sidecars,
- bal: b.bal,
+ accessList: b.accessList,
}
}
@@ -715,7 +600,7 @@ func (b *Block) WithBody(body Body) *Block {
withdrawals: slices.Clone(body.Withdrawals),
witness: b.witness,
sidecars: b.sidecars,
- bal: b.bal,
+ accessList: b.accessList,
}
for i := range body.Uncles {
block.uncles[i] = CopyHeader(body.Uncles[i])
@@ -731,7 +616,7 @@ func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block {
uncles: b.uncles,
witness: b.witness,
sidecars: b.sidecars,
- bal: b.bal,
+ accessList: b.accessList,
}
if withdrawals != nil {
block.withdrawals = make([]*Withdrawal, len(withdrawals))
@@ -740,46 +625,44 @@ func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block {
return block
}
-// WithSidecars returns a block containing the given blobs.
-func (b *Block) WithSidecars(sidecars BlobSidecars) *Block {
+// WithAccessList returns a block containing the given access list.
+func (b *Block) WithAccessList(accessList *BlockAccessListEncode) *Block {
block := &Block{
header: b.header,
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
witness: b.witness,
- bal: b.bal,
- }
- if sidecars != nil {
- block.sidecars = make(BlobSidecars, len(sidecars))
- copy(block.sidecars, sidecars)
+ sidecars: b.sidecars,
+ accessList: accessList,
}
return block
}
-func (b *Block) WithBAL(bal *BlockAccessListEncode) *Block {
+// WithSidecars returns a block containing the given blobs.
+func (b *Block) WithSidecars(sidecars BlobSidecars) *Block {
block := &Block{
header: b.header,
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
witness: b.witness,
- sidecars: b.sidecars,
+ accessList: b.accessList,
+ }
+ if sidecars != nil {
+ block.sidecars = make(BlobSidecars, len(sidecars))
+ copy(block.sidecars, sidecars)
}
- block.bal = bal
return block
}
-func (b *Block) UpdateBAL(bal *BlockAccessListEncode) {
- b.bal = bal
-}
-
func (b *Block) WithWitness(witness *ExecutionWitness) *Block {
return &Block{
header: b.header,
transactions: b.transactions,
uncles: b.uncles,
withdrawals: b.withdrawals,
+ accessList: b.accessList,
witness: witness,
sidecars: b.sidecars,
}
@@ -861,3 +744,15 @@ func EncodeSigHeader(w io.Writer, header *Header, chainId *big.Int) {
panic("can't encode: " + err.Error())
}
}
+
+type BlockAccessListEncode struct {
+ Version uint32 // Version of the access list format
+ Number uint64 // number of the block that the BAL is for
+ Hash common.Hash // hash of the block that the BAL is for
+ SignData []byte // sign data for BAL
+ AccessList *bal.BlockAccessList // encoded access list
+}
+
+func (b *BlockAccessListEncode) String() string {
+ return fmt.Sprintf("Version: %d, Number: %d, Hash: %s, SignData: %s, AccessList: %s", b.Version, b.Number, b.Hash.Hex(), common.Bytes2Hex(b.SignData), b.AccessList.String())
+}
diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go
index 629be38e7d..864176ef9c 100644
--- a/core/verkle_witness_test.go
+++ b/core/verkle_witness_test.go
@@ -232,7 +232,7 @@ func TestProcessParentBlockHash(t *testing.T) {
// etc
checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) {
statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified)
- statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode)
+ statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode, tracing.CodeChangeUnspecified)
// Process n blocks, from 1 .. num
var num = 2
for i := 1; i <= num; i++ {
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 3d95861d6a..f1192ef087 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -78,6 +78,7 @@ type TxContext struct {
BlobHashes []common.Hash // Provides information for BLOBHASH
BlobFeeCap *big.Int // Is used to zero the blobbasefee if NoBaseFee is set
AccessEvents *state.AccessEvents // Capture all state accesses for this tx
+ Index uint64 // the index of the transaction within the block being executed (0 if executing a standalone call)
}
// EVM is the Ethereum Virtual Machine base object and provides
@@ -591,6 +592,7 @@ func (evm *EVM) create(caller common.Address, code []byte, gas uint64, value *ui
// - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address)
storageRoot := evm.StateDB.GetStorageRoot(address)
+
if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage
@@ -694,7 +696,7 @@ func (evm *EVM) initNewContract(contract *Contract, address common.Address) ([]b
}
}
- evm.StateDB.SetCode(address, ret)
+ evm.StateDB.SetCode(address, ret, tracing.CodeChangeContractCreation)
return ret, nil
}
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 58f039df9f..c7c1274bf2 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -98,8 +98,8 @@ var (
func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var (
- y, x = stack.Back(1), stack.Back(0)
- current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
+ y, x = stack.Back(1), stack.Back(0)
+ current, original = evm.StateDB.GetStateAndCommittedState(contract.Address(), x.Bytes32())
)
// The legacy gas metering only takes into consideration the current state
// Legacy rules should be applied if we are in Petersburg (removal of EIP-1283)
@@ -139,7 +139,6 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
if current == value { // noop (1)
return params.NetSstoreNoopGas, nil
}
- original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32())
if original == current {
if original == (common.Hash{}) { // create slot (2.1.1)
return params.NetSstoreInitGas, nil
@@ -188,15 +187,14 @@ func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m
}
// Gas sentry honoured, do the actual gas calculation based on the stored value
var (
- y, x = stack.Back(1), stack.Back(0)
- current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
+ y, x = stack.Back(1), stack.Back(0)
+ current, original = evm.StateDB.GetStateAndCommittedState(contract.Address(), x.Bytes32())
)
value := common.Hash(y.Bytes32())
if current == value { // noop (1)
return params.SloadGasEIP2200, nil
}
- original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32())
if original == current {
if original == (common.Hash{}) { // create slot (2.1.1)
return params.SstoreSetGasEIP2200, nil
diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go
index cb6143c0b5..7fe76b0a63 100644
--- a/core/vm/gas_table_test.go
+++ b/core/vm/gas_table_test.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
@@ -87,7 +88,7 @@ func TestEIP2200(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
- statedb.SetCode(address, hexutil.MustDecode(tt.input))
+ statedb.SetCode(address, hexutil.MustDecode(tt.input), tracing.CodeChangeUnspecified)
statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original}))
statedb.Finalise(true) // Push the state into the "original" slot
@@ -139,7 +140,7 @@ func TestCreateGas(t *testing.T) {
address := common.BytesToAddress([]byte("contract"))
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
- statedb.SetCode(address, hexutil.MustDecode(tt.code))
+ statedb.SetCode(address, hexutil.MustDecode(tt.code), tracing.CodeChangeUnspecified)
statedb.Finalise(true)
vmctx := BlockContext{
CanTransfer: func(StateDB, common.Address, *uint256.Int) bool { return true },
diff --git a/core/vm/interface.go b/core/vm/interface.go
index d9bafdad2a..b3adc2626f 100644
--- a/core/vm/interface.go
+++ b/core/vm/interface.go
@@ -35,7 +35,6 @@ type StateDB interface {
SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) uint256.Int
GetBalance(common.Address) *uint256.Int
- SetBalance(addr common.Address, amount *uint256.Int, reason tracing.BalanceChangeReason)
GetNonce(common.Address) uint64
SetNonce(common.Address, uint64, tracing.NonceChangeReason)
@@ -44,14 +43,14 @@ type StateDB interface {
GetCode(common.Address) []byte
// SetCode sets the new code for the address, and returns the previous code, if any.
- SetCode(common.Address, []byte) []byte
+ SetCode(common.Address, []byte, tracing.CodeChangeReason) []byte
GetCodeSize(common.Address) int
AddRefund(uint64)
SubRefund(uint64)
GetRefund() uint64
- GetCommittedState(common.Address, common.Hash) common.Hash
+ GetStateAndCommittedState(common.Address, common.Hash) (common.Hash, common.Hash)
GetState(common.Address, common.Hash) common.Hash
SetState(common.Address, common.Hash, common.Hash) common.Hash
GetStorageRoot(addr common.Address) common.Hash
@@ -84,7 +83,6 @@ type StateDB interface {
// AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform
// even if the feature/fork is not active yet
AddSlotToAccessList(addr common.Address, slot common.Hash)
- ClearAccessList()
// PointCache returns the point cache used in computations
PointCache() *utils.PointCache
@@ -96,8 +94,6 @@ type StateDB interface {
RevertToSnapshot(int)
Snapshot() int
- NoTries() bool
-
AddLog(*types.Log)
GetLogs(hash common.Hash, blockNumber uint64, blockHash common.Hash, blockTime uint64) []*types.Log
AddPreimage(common.Hash, []byte)
diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go
index 0b93dd59e7..249ec21021 100644
--- a/core/vm/interpreter_test.go
+++ b/core/vm/interpreter_test.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
@@ -44,7 +45,7 @@ func TestLoopInterrupt(t *testing.T) {
for i, tt := range loopInterruptTests {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.CreateAccount(address)
- statedb.SetCode(address, common.Hex2Bytes(tt))
+ statedb.SetCode(address, common.Hex2Bytes(tt), tracing.CodeChangeUnspecified)
statedb.Finalise(true)
evm := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, Config{})
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index ff3875868f..c2987f0e49 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -34,10 +34,10 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
}
// Gas sentry honoured, do the actual gas calculation based on the stored value
var (
- y, x = stack.Back(1), stack.peek()
- slot = common.Hash(x.Bytes32())
- current = evm.StateDB.GetState(contract.Address(), slot)
- cost = uint64(0)
+ y, x = stack.Back(1), stack.peek()
+ slot = common.Hash(x.Bytes32())
+ current, original = evm.StateDB.GetStateAndCommittedState(contract.Address(), x.Bytes32())
+ cost = uint64(0)
)
// Check slot presence in the access list
if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent {
@@ -52,7 +52,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
// return params.SloadGasEIP2200, nil
return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS
}
- original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32())
+
if original == current {
if original == (common.Hash{}) { // create slot (2.1.1)
return cost + params.SstoreSetGasEIP2200, nil
@@ -74,7 +74,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
if original == value {
if original == (common.Hash{}) { // reset to original inexistent slot (2.2.2.1)
// EIP 2200 Original clause:
- //evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
+ // evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.WarmStorageReadCostEIP2929)
} else { // reset to original existing slot (2.2.2.2)
// EIP 2200 Original clause:
@@ -86,7 +86,7 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
}
}
// EIP-2200 original clause:
- //return params.SloadGasEIP2200, nil // dirty update (2.2)
+ // return params.SloadGasEIP2200, nil // dirty update (2.2)
return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2)
}
}
@@ -211,7 +211,7 @@ var (
// SLOAD_GAS 800 = WARM_STORAGE_READ_COST
// SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST
//
- //The other parameters defined in EIP 2200 are unchanged.
+ // The other parameters defined in EIP 2200 are unchanged.
// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified
gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200)
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index 9d984291f2..b40e99d047 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
@@ -139,7 +140,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil)
cfg.State.CreateAccount(address)
// set the receiver's (the executing contract) code for execution.
- cfg.State.SetCode(address, code)
+ cfg.State.SetCode(address, code, tracing.CodeChangeUnspecified)
// Call the code with the given configuration.
ret, leftOverGas, err := vmenv.Call(
cfg.Origin,
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index f2c80a8f1d..d2eac32696 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -116,7 +116,7 @@ func TestCall(t *testing.T) {
byte(vm.PUSH1), 32,
byte(vm.PUSH1), 0,
byte(vm.RETURN),
- })
+ }, tracing.CodeChangeUnspecified)
ret, _, err := Call(address, nil, &Config{State: state})
if err != nil {
@@ -169,7 +169,7 @@ func benchmarkEVM_Create(bench *testing.B, code string) {
)
statedb.CreateAccount(sender)
- statedb.SetCode(receiver, common.FromHex(code))
+ statedb.SetCode(receiver, common.FromHex(code), tracing.CodeChangeUnspecified)
runtimeConfig := Config{
Origin: sender,
State: statedb,
@@ -234,7 +234,7 @@ func BenchmarkEVM_SWAP1(b *testing.B) {
b.Run("10k", func(b *testing.B) {
contractCode := swapContract(10_000)
- state.SetCode(contractAddr, contractCode)
+ state.SetCode(contractAddr, contractCode, tracing.CodeChangeUnspecified)
for i := 0; i < b.N; i++ {
_, _, err := Call(contractAddr, []byte{}, &Config{State: state})
@@ -265,7 +265,7 @@ func BenchmarkEVM_RETURN(b *testing.B) {
b.ReportAllocs()
contractCode := returnContract(n)
- state.SetCode(contractAddr, contractCode)
+ state.SetCode(contractAddr, contractCode, tracing.CodeChangeUnspecified)
for i := 0; i < b.N; i++ {
ret, _, err := Call(contractAddr, []byte{}, &Config{State: state})
@@ -424,12 +424,12 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
byte(vm.PUSH1), 0x00,
byte(vm.PUSH1), 0x00,
byte(vm.REVERT),
- })
+ }, tracing.CodeChangeUnspecified)
}
//cfg.State.CreateAccount(cfg.Origin)
// set the receiver's (the executing contract) code for execution.
- cfg.State.SetCode(destination, code)
+ cfg.State.SetCode(destination, code, tracing.CodeChangeUnspecified)
Call(destination, nil, cfg)
b.Run(name, func(b *testing.B) {
@@ -671,7 +671,7 @@ func TestColdAccountAccessCost(t *testing.T) {
Tracer: &tracing.Hooks{
OnOpcode: func(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) {
// Uncomment to investigate failures:
- //t.Logf("%d: %v %d", step, vm.OpCode(op).String(), cost)
+ //t.Logf("%d: %v %d", step, vm.OpCode(op).PrettyPrint(), cost)
if step == tc.step {
have = cost
}
@@ -775,12 +775,12 @@ func TestRuntimeJSTracer(t *testing.T) {
for i, jsTracer := range jsTracers {
for j, tc := range tests {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- statedb.SetCode(main, tc.code)
- statedb.SetCode(common.HexToAddress("0xbb"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xcc"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xdd"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xff"), suicideCode)
+ statedb.SetCode(main, tc.code, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xbb"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xcc"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xdd"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xee"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xff"), suicideCode, tracing.CodeChangeUnspecified)
tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
if err != nil {
@@ -951,12 +951,12 @@ func TestRuntimeJSTracerWithOpcodeOptimizer(t *testing.T) {
for i, jsTracer := range jsTracers {
for j, tc := range tests {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- statedb.SetCode(main, tc.code)
- statedb.SetCode(common.HexToAddress("0xbb"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xcc"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xdd"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xee"), calleeCode)
- statedb.SetCode(common.HexToAddress("0xff"), depressedCode)
+ statedb.SetCode(main, tc.code, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xbb"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xcc"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xdd"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xee"), calleeCode, tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xff"), depressedCode, tracing.CodeChangeUnspecified)
/* wait for optimized code to be generated */
time.Sleep(time.Second)
tracer, err := tracers.DefaultDirectory.New(jsTracer, new(tracers.Context), nil, params.MergedTestChainConfig)
@@ -1043,8 +1043,8 @@ func BenchmarkTracerStepVsCallFrame(b *testing.B) {
// delegation designator incurs the correct amount of gas based on the tracer.
func TestDelegatedAccountAccessCost(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
- statedb.SetCode(common.HexToAddress("0xff"), types.AddressToDelegation(common.HexToAddress("0xaa")))
- statedb.SetCode(common.HexToAddress("0xaa"), program.New().Return(0, 0).Bytes())
+ statedb.SetCode(common.HexToAddress("0xff"), types.AddressToDelegation(common.HexToAddress("0xaa")), tracing.CodeChangeUnspecified)
+ statedb.SetCode(common.HexToAddress("0xaa"), program.New().Return(0, 0).Bytes(), tracing.CodeChangeUnspecified)
for i, tc := range []struct {
code []byte
diff --git a/eth/api_debug.go b/eth/api_debug.go
index 5ef42d3c66..8e8cc5749b 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -17,11 +17,14 @@
package eth
import (
+ "bytes"
"context"
"errors"
"fmt"
"time"
+ "github.com/ethereum/go-ethereum/core/types/bal"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -191,7 +194,6 @@ func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hex
OnlyWithAddresses: !incompletes,
Start: start,
Max: uint64(maxResults),
- StateScheme: stateDb.Database().TrieDB().Scheme(),
}
if maxResults > AccountRangeMaxResults || maxResults <= 0 {
opts.Max = AccountRangeMaxResults
@@ -446,3 +448,46 @@ func (api *DebugAPI) GetTrieFlushInterval() (string, error) {
}
return api.eth.blockchain.GetTrieFlushInterval().String(), nil
}
+
+// StateSize returns the current state size statistics from the state size tracker.
+// Returns an error if the state size tracker is not initialized or if stats are not ready.
+func (api *DebugAPI) StateSize(blockHashOrNumber *rpc.BlockNumberOrHash) (interface{}, error) {
+ // StateSizer functionality is not available in this version
+ return nil, errors.New("state size tracker is not available in this version")
+}
+
+func (api *DebugAPI) ExecutionWitness(bn rpc.BlockNumber) (interface{}, error) {
+ // ExecutionWitness functionality is not available in this version
+ return nil, errors.New("execution witness is not available in this version")
+}
+
+func (api *DebugAPI) ExecutionWitnessByHash(hash common.Hash) (interface{}, error) {
+ // ExecutionWitness functionality is not available in this version
+ return nil, errors.New("execution witness is not available in this version")
+}
+
+func (api *DebugAPI) GetBlockAccessList(number rpc.BlockNumberOrHash) (*bal.BlockAccessList, error) {
+ var block *types.Block
+ if num := number.BlockNumber; num != nil {
+ block = api.eth.blockchain.GetBlockByNumber(uint64(num.Int64()))
+ } else if hash := number.BlockHash; hash != nil {
+ block = api.eth.blockchain.GetBlockByHash(*hash)
+ }
+
+ if block == nil {
+ return nil, fmt.Errorf("block not found")
+ }
+ return block.AccessList().AccessList, nil
+}
+
+func (api *DebugAPI) GetEncodedBlockAccessList(number rpc.BlockNumberOrHash) ([]byte, error) {
+ bal, err := api.GetBlockAccessList(number)
+ if err != nil {
+ return nil, err
+ }
+ var enc bytes.Buffer
+ if err = bal.EncodeRLP(&enc); err != nil {
+ return nil, err
+ }
+ return enc.Bytes(), nil
+}
diff --git a/eth/backend.go b/eth/backend.go
index 6b83607f81..4af8eeb227 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -278,6 +278,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if networkID == 0 {
networkID = chainConfig.ChainID.Uint64()
}
+ if config.ExperimentalBAL {
+ chainConfig.EnableBAL = true
+ }
// Assemble the Ethereum object.
eth := &Ethereum{
@@ -332,7 +335,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
options = &core.BlockChainConfig{
TrieCleanLimit: config.TrieCleanCache,
NoPrefetch: config.NoPrefetch,
- EnableBAL: config.EnableBAL,
TrieDirtyLimit: config.TrieDirtyCache,
ArchiveMode: config.NoPruning,
TrieTimeLimit: config.TrieTimeout,
@@ -358,6 +360,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
EnablePreimageRecording: config.EnablePreimageRecording,
EnableOpcodeOptimizations: config.EnableOpcodeOptimizing,
},
+ EnableBAL: config.ExperimentalBAL,
}
)
if config.DisableTxIndexer {
@@ -447,13 +450,13 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
RequiredBlocks: config.RequiredBlocks,
DirectBroadcast: config.DirectBroadcast,
EnableEVNFeatures: stack.Config().EnableEVNFeatures,
- EnableBAL: config.EnableBAL,
EVNNodeIdsWhitelist: stack.Config().P2P.EVNNodeIdsWhitelist,
ProxyedValidatorAddresses: stack.Config().P2P.ProxyedValidatorAddresses,
ProxyedNodeIds: stack.Config().P2P.ProxyedNodeIds,
DisablePeerTxBroadcast: config.DisablePeerTxBroadcast,
PeerSet: newPeerSet(),
EnableQuickBlockFetching: stack.Config().EnableQuickBlockFetching,
+ EnableBAL: config.ExperimentalBAL,
}); err != nil {
return nil, err
}
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index a95f0dfd14..f40914ff80 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -112,7 +112,6 @@ type Config struct {
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
- EnableBAL bool
DirectBroadcast bool
DisableSnapProtocol bool // Whether disable snap protocol
RangeLimit bool
@@ -216,6 +215,11 @@ type Config struct {
// OverrideVerkle (TODO: remove after the fork)
OverrideVerkle *uint64 `toml:",omitempty"`
+ // ExperimentalBAL enables EIP-7928 block access list verification when
+ // executing post-cancun blocks that contain access lists, and access list
+ // construction on blocks that do not.
+ ExperimentalBAL bool `toml:",omitempty"`
+
// blob setting
BlobExtraReserve uint64
diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go
index 70b5ef0bae..a190117caa 100644
--- a/eth/ethconfig/gen_config.go
+++ b/eth/ethconfig/gen_config.go
@@ -30,7 +30,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
BscDiscoveryURLs []string
NoPruning bool
NoPrefetch bool
- EnableBAL bool
DirectBroadcast bool
DisableSnapProtocol bool
RangeLimit bool
@@ -77,6 +76,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideMendel *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
+ ExperimentalBAL bool `toml:",omitempty"`
BlobExtraReserve uint64
EnableOpcodeOptimizing bool
EnableIncrSnapshots bool
@@ -100,7 +100,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.BscDiscoveryURLs = c.BscDiscoveryURLs
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
- enc.EnableBAL = c.EnableBAL
enc.DirectBroadcast = c.DirectBroadcast
enc.DisableSnapProtocol = c.DisableSnapProtocol
enc.RangeLimit = c.RangeLimit
@@ -147,6 +146,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.OverrideOsaka = c.OverrideOsaka
enc.OverrideMendel = c.OverrideMendel
enc.OverrideVerkle = c.OverrideVerkle
+ enc.ExperimentalBAL = c.ExperimentalBAL
enc.BlobExtraReserve = c.BlobExtraReserve
enc.EnableOpcodeOptimizing = c.EnableOpcodeOptimizing
enc.EnableIncrSnapshots = c.EnableIncrSnapshots
@@ -174,7 +174,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
BscDiscoveryURLs []string
NoPruning *bool
NoPrefetch *bool
- EnableBAL *bool
DirectBroadcast *bool
DisableSnapProtocol *bool
RangeLimit *bool
@@ -221,6 +220,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
OverrideOsaka *uint64 `toml:",omitempty"`
OverrideMendel *uint64 `toml:",omitempty"`
OverrideVerkle *uint64 `toml:",omitempty"`
+ ExperimentalBAL *bool `toml:",omitempty"`
BlobExtraReserve *uint64
EnableOpcodeOptimizing *bool
EnableIncrSnapshots *bool
@@ -271,9 +271,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.NoPrefetch != nil {
c.NoPrefetch = *dec.NoPrefetch
}
- if dec.EnableBAL != nil {
- c.EnableBAL = *dec.EnableBAL
- }
if dec.DirectBroadcast != nil {
c.DirectBroadcast = *dec.DirectBroadcast
}
@@ -412,6 +409,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.OverrideVerkle != nil {
c.OverrideVerkle = dec.OverrideVerkle
}
+ if dec.ExperimentalBAL != nil {
+ c.ExperimentalBAL = *dec.ExperimentalBAL
+ }
if dec.BlobExtraReserve != nil {
c.BlobExtraReserve = *dec.BlobExtraReserve
}
diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go
index ee909e9e66..2b9378fe65 100644
--- a/eth/fetcher/block_fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -889,7 +889,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) {
hash := block.Hash()
// Run the import on a new thread
- log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash, "balSize", block.BALSize())
+ log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
go func() {
// If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash())
@@ -933,6 +933,7 @@ func (f *BlockFetcher) importBlocks(op *blockOrHeaderInject) {
log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
return
}
+
// If import succeeded, broadcast the block
blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, false)
diff --git a/eth/handler.go b/eth/handler.go
index bb63d64bfa..d922f8b416 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -152,7 +152,6 @@ type handler struct {
networkID uint64
disablePeerTxBroadcast bool
enableEVNFeatures bool
- enableBAL bool
evnNodeIdsWhitelistMap map[enode.ID]struct{}
proxyedValidatorAddressMap map[common.Address]struct{}
proxyedNodeIdsMap map[enode.ID]struct{}
@@ -177,6 +176,8 @@ type handler struct {
txFetcher *fetcher.TxFetcher
peers *peerSet
+ enableBAL bool
+
eventMux *event.TypeMux
txsCh chan core.NewTxsEvent
txsSub event.Subscription
@@ -224,7 +225,6 @@ func newHandler(config *handlerConfig) (*handler, error) {
requiredBlocks: config.RequiredBlocks,
directBroadcast: config.DirectBroadcast,
enableEVNFeatures: config.EnableEVNFeatures,
- enableBAL: config.EnableBAL,
evnNodeIdsWhitelistMap: make(map[enode.ID]struct{}),
proxyedValidatorAddressMap: make(map[common.Address]struct{}),
proxyedNodeIdsMap: make(map[enode.ID]struct{}),
@@ -232,6 +232,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
handlerDoneCh: make(chan struct{}),
handlerStartCh: make(chan struct{}),
stopCh: make(chan struct{}),
+ enableBAL: config.EnableBAL,
}
for _, nodeID := range config.EVNNodeIdsWhitelist {
h.evnNodeIdsWhitelistMap[nodeID] = struct{}{}
@@ -349,7 +350,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
for i, item := range res {
block := types.NewBlockWithHeader(item.Header).WithBody(types.Body{Transactions: item.Txs, Uncles: item.Uncles})
block = block.WithSidecars(item.Sidecars)
- block = block.WithBAL(item.BAL)
+ block = block.WithAccessList(item.BlockAccessList)
block.ReceivedAt = time.Now()
block.ReceivedFrom = p.ID()
if err := block.SanityCheck(); err != nil {
@@ -472,10 +473,12 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
peer.Log().Error("Bsc extension barrier failed", "err", err)
return err
}
- if bscExt != nil && bscExt.Version() == bsc.Bsc3 {
- peer.CanHandleBAL.Store(true)
- log.Debug("runEthPeer", "bscExt.Version", bscExt.Version(), "CanHandleBAL", peer.CanHandleBAL.Load())
+
+ if bscExt != nil && bscExt.Version() == bsc.Bsc7928 {
+ peer.CanHandleBAL7928.Store(true)
+ log.Debug("runEthPeer", "bscExt.Version", bscExt.Version(), "CanHandleBAL7928", peer.CanHandleBAL7928.Load())
}
+
// Execute the Ethereum handshake
var (
head = h.chain.CurrentHeader()
@@ -844,7 +847,7 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
log.Debug("Broadcast block to peer",
"hash", hash, "peer", peer.ID(),
"EVNPeerFlag", peer.EVNPeerFlag.Load(),
- "CanHandleBAL", peer.CanHandleBAL.Load(),
+ "CanHandleBAL7928", peer.CanHandleBAL7928.Load(),
)
peer.AsyncSendNewBlock(block, td)
}
@@ -857,7 +860,7 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
log.Debug("Broadcast block to proxyed peer",
"hash", hash, "peer", peer.ID(),
"EVNPeerFlag", peer.EVNPeerFlag.Load(),
- "CanHandleBAL", peer.CanHandleBAL.Load(),
+ "CanHandleBAL7928", peer.CanHandleBAL7928.Load(),
)
peer.AsyncSendNewBlock(block, td)
proxyedPeersCnt++
@@ -873,7 +876,7 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
log.Debug("Broadcast block to EVN peer",
"hash", hash, "peer", peer.ID(),
"EVNPeerFlag", peer.EVNPeerFlag.Load(),
- "CanHandleBAL", peer.CanHandleBAL.Load(),
+ "CanHandleBAL7928", peer.CanHandleBAL7928.Load(),
)
peer.AsyncSendNewBlock(block, td)
evnPeersCnt++
@@ -894,8 +897,7 @@ func (h *handler) BroadcastBlock(block *types.Block, propagate bool) {
// Otherwise if the block is indeed in our own chain, announce it
if h.chain.HasBlock(hash, block.NumberU64()) {
for _, peer := range peers {
- log.Debug("Announced block to peer", "hash", hash, "peer", peer.ID(),
- "EVNPeerFlag", peer.EVNPeerFlag.Load(), "CanHandleBAL", peer.CanHandleBAL.Load())
+ log.Debug("Announced block to peer", "hash", hash, "peer", peer.ID(), "EVNPeerFlag", peer.EVNPeerFlag.Load(), "CanHandleBAL7928", peer.CanHandleBAL7928.Load())
peer.AsyncSendNewBlockHash(block)
}
log.Debug("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
diff --git a/eth/handler_eth.go b/eth/handler_eth.go
index d96b3b849c..33bda9ca52 100644
--- a/eth/handler_eth.go
+++ b/eth/handler_eth.go
@@ -141,8 +141,13 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, packet *eth.NewBlockPa
if sidecars != nil {
block = block.WithSidecars(sidecars)
}
- if packet.Bal != nil && h.chain.Engine().VerifyBAL(block, packet.Bal) == nil {
- block = block.WithBAL(packet.Bal)
+ if h.enableBAL && packet.BlockAccessList != nil {
+ if err := h.chain.Engine().VerifyBAL(block, packet.BlockAccessList); err == nil {
+ block = block.WithAccessList(packet.BlockAccessList)
+ log.Debug("block with BAL", "hash", block.Hash(), "number", block.Number(), "signData", common.Bytes2Hex(block.AccessList().SignData), "peer", peer.ID())
+ } else {
+ log.Error("invalid BAL", "block", block.Number(), "hash", block.Hash(), "peer", peer.ID(), "error", err)
+ }
}
// Schedule the block for import
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 4cda82f5cd..9d635ac50a 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -273,13 +273,17 @@ func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.H
return
}
-func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks) (*types.Block, []*types.Receipt, error) {
+func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, body *types.Body, receipts []*types.Receipt, tracer *tracing.Hooks, onFinalize func()) (*types.Block, []*types.Receipt, error) {
// Finalize block
c.Finalize(chain, header, state, &body.Transactions, body.Uncles, body.Withdrawals, nil, nil, nil, tracer)
// Assign the final state root to header.
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+ if onFinalize != nil {
+ onFinalize()
+ }
+
// Header seems complete, assemble into a block and return
return types.NewBlock(header, body, receipts, trie.NewStackTrie(nil)), receipts, nil
}
diff --git a/eth/protocols/bsc/handler.go b/eth/protocols/bsc/handler.go
index 994a362e3a..da548b20d2 100644
--- a/eth/protocols/bsc/handler.go
+++ b/eth/protocols/bsc/handler.go
@@ -165,15 +165,21 @@ func handleGetBlocksByRange(backend Backend, msg Decoder, peer *Peer) error {
if block == nil {
return fmt.Errorf("msg %v, cannot get start block: %v, %v", GetBlocksByRangeMsg, req.StartBlockHeight, req.StartBlockHash)
}
+ if !peer.CanHandleBAL7928.Load() && block.AccessList() != nil {
+ block = block.WithAccessList(nil) // remove the block access list
+ }
blocks = append(blocks, NewBlockData(block))
- balSize := block.BALSize()
+ balSize := block.AccessListSize()
for i := uint64(1); i < req.Count; i++ {
block = backend.Chain().GetBlockByHash(block.ParentHash())
if block == nil {
break
}
- balSize += block.BALSize()
+ if !peer.CanHandleBAL7928.Load() && block.AccessList() != nil {
+ block = block.WithAccessList(nil) // remove the block access list
+ }
blocks = append(blocks, NewBlockData(block))
+ balSize += block.AccessListSize()
}
log.Debug("reply GetBlocksByRange msg", "from", peer.id, "req", req.Count, "blocks", len(blocks), "balSize", balSize)
diff --git a/eth/protocols/bsc/protocol.go b/eth/protocols/bsc/protocol.go
index 572c24debb..cef7d672c2 100644
--- a/eth/protocols/bsc/protocol.go
+++ b/eth/protocols/bsc/protocol.go
@@ -10,9 +10,10 @@ import (
// Constants to match up protocol versions and messages
const (
- Bsc1 = 1
- Bsc2 = 2
- Bsc3 = 3 // to BAL process
+ Bsc1 = 1
+ Bsc2 = 2
+ Bsc3 = 3 // to BAL process
+ Bsc7928 = 4 // to EIP7928 process
)
// ProtocolName is the official short name of the `bsc` protocol used during
@@ -21,11 +22,11 @@ const ProtocolName = "bsc"
// ProtocolVersions are the supported versions of the `bsc` protocol (first
// is primary).
-var ProtocolVersions = []uint{Bsc1, Bsc2, Bsc3}
+var ProtocolVersions = []uint{Bsc1, Bsc2, Bsc3, Bsc7928}
// protocolLengths are the number of implemented message corresponding to
// different protocol versions.
-var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4, Bsc3: 4}
+var protocolLengths = map[uint]uint64{Bsc1: 2, Bsc2: 4, Bsc3: 4, Bsc7928: 4}
// maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024
@@ -82,23 +83,23 @@ func (*GetBlocksByRangePacket) Kind() byte { return GetBlocksByRangeMsg }
// BlockData contains types.extblock + sidecars
type BlockData struct {
- Header *types.Header
- Txs []*types.Transaction
- Uncles []*types.Header
- Withdrawals []*types.Withdrawal `rlp:"optional"`
- Sidecars types.BlobSidecars `rlp:"optional"`
- BAL *types.BlockAccessListEncode `rlp:"optional"`
+ Header *types.Header
+ Txs []*types.Transaction
+ Uncles []*types.Header
+ Withdrawals []*types.Withdrawal `rlp:"optional"`
+ Sidecars types.BlobSidecars `rlp:"optional"`
+ BlockAccessList *types.BlockAccessListEncode `rlp:"optional"`
}
// NewBlockData creates a new BlockData object from a block
func NewBlockData(block *types.Block) *BlockData {
return &BlockData{
- Header: block.Header(),
- Txs: block.Transactions(),
- Uncles: block.Uncles(),
- Withdrawals: block.Withdrawals(),
- Sidecars: block.Sidecars(),
- BAL: block.BAL(),
+ Header: block.Header(),
+ Txs: block.Transactions(),
+ Uncles: block.Uncles(),
+ Withdrawals: block.Withdrawals(),
+ Sidecars: block.Sidecars(),
+ BlockAccessList: block.AccessList(),
}
}
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index 81c3501cdd..8fb4e17d5a 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -376,13 +376,6 @@ func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
- if ann.Bal != nil {
- log.Debug("handleNewBlock, BAL", "number", ann.Block.NumberU64(), "hash", ann.Block.Hash(), "peer", peer.ID(),
- "version", ann.Bal.Version, "signData", len(ann.Bal.SignData), "accounts", len(ann.Bal.Accounts), "balSize", ann.Block.BALSize())
- } else {
- log.Debug("handleNewBlock, no BAL", "number", ann.Block.NumberU64(), "hash", ann.Block.Hash(), "peer", peer.ID(),
- "txNum", len(ann.Block.Transactions()), "balSize", ann.Block.BALSize())
- }
// Now that we have our packet, perform operations using the interface methods
if err := ann.sanityCheck(); err != nil {
return err
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index d7ef06b265..5f753820d2 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -312,23 +312,22 @@ func (p *Peer) AsyncSendNewBlockHash(block *types.Block) {
func (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error {
// Mark all the block hash as known, but ensure we don't overflow our limits
p.knownBlocks.Add(block.Hash())
- bal := block.BAL()
- if !p.CanHandleBAL.Load() {
- bal = nil
+ accessList := block.AccessList()
+ if !p.CanHandleBAL7928.Load() {
+ accessList = nil
}
- if bal != nil {
+ if accessList != nil {
log.Debug("SendNewBlock", "number", block.NumberU64(), "hash", block.Hash(), "peer", p.ID(),
- "balSize", block.BALSize(), "version", bal.Version, "canHandleBAL", p.CanHandleBAL.Load())
+ "balSize", block.AccessListSize(), "version", accessList.Version, "CanHandleBAL7928", p.CanHandleBAL7928.Load())
} else {
- log.Debug("SendNewBlock no BAL", "number", block.NumberU64(), "hash", block.Hash(), "peer", p.ID(),
- "txNum", len(block.Transactions()), "canHandleBAL", p.CanHandleBAL.Load())
+ log.Debug("SendNewBlock no block access list", "number", block.NumberU64(), "hash", block.Hash(), "peer", p.ID(),
+ "txNum", len(block.Transactions()), "CanHandleBAL7928", p.CanHandleBAL7928.Load())
}
-
return p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{
- Block: block,
- TD: td,
- Sidecars: block.Sidecars(),
- Bal: bal,
+ Block: block,
+ TD: td,
+ Sidecars: block.Sidecars(),
+ BlockAccessList: accessList,
})
}
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index 60d8a2f6b9..7d8436da8f 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -231,10 +231,10 @@ type BlockHeadersRLPPacket struct {
// NewBlockPacket is the network packet for the block propagation message.
type NewBlockPacket struct {
- Block *types.Block
- TD *big.Int
- Sidecars types.BlobSidecars `rlp:"optional"`
- Bal *types.BlockAccessListEncode `rlp:"optional"`
+ Block *types.Block
+ TD *big.Int
+ Sidecars types.BlobSidecars `rlp:"optional"`
+ BlockAccessList *types.BlockAccessListEncode `rlp:"optional"`
}
// sanityCheck verifies that the values are reasonable, as a DoS protection
@@ -242,7 +242,7 @@ func (request *NewBlockPacket) sanityCheck() error {
if err := request.Block.SanityCheck(); err != nil {
return err
}
- //TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
+ // TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
// larger, it will still fit within 100 bits
if tdlen := request.TD.BitLen(); tdlen > 100 {
return fmt.Errorf("too large block TD: bitlen %d", tdlen)
diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go
index fbb891538e..56a94796f7 100644
--- a/eth/tracers/js/tracer_test.go
+++ b/eth/tracers/js/tracer_test.go
@@ -40,6 +40,9 @@ type dummyStatedb struct {
func (*dummyStatedb) GetRefund() uint64 { return 1337 }
func (*dummyStatedb) GetBalance(addr common.Address) *uint256.Int { return new(uint256.Int) }
+func (*dummyStatedb) GetStateAndCommittedState(_ common.Address, _ common.Hash) (common.Hash, common.Hash) {
+ return common.Hash{}, common.Hash{}
+}
type vmContext struct {
blockCtx vm.BlockContext
diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go
index dd6927b9ed..52e3270323 100644
--- a/eth/tracers/logger/logger_test.go
+++ b/eth/tracers/logger/logger_test.go
@@ -38,6 +38,9 @@ func (*dummyStatedb) GetState(_ common.Address, _ common.Hash) common.Hash { ret
func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) common.Hash {
return common.Hash{}
}
+func (*dummyStatedb) GetStateAndCommittedState(_ common.Address, _ common.Hash) (common.Hash, common.Hash) {
+ return common.Hash{}, common.Hash{}
+}
func TestStoreCapture(t *testing.T) {
var (
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index c4d8254f0b..597ba23028 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -161,10 +161,11 @@ func (ec *Client) BlobSidecarByTxHash(ctx context.Context, hash common.Hash) (*t
}
type rpcBlock struct {
- Hash *common.Hash `json:"hash"`
- Transactions []rpcTransaction `json:"transactions"`
- UncleHashes []common.Hash `json:"uncles"`
- Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
+ Hash *common.Hash `json:"hash"`
+ Transactions []rpcTransaction `json:"transactions"`
+ UncleHashes []common.Hash `json:"uncles"`
+ Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
+ AccessList *types.BlockAccessListEncode `json:"accessList,omitempty"`
}
func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index d54f34a839..60b9df5bdf 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -830,7 +830,7 @@ func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.Precompi
}
// Override account(contract) code.
if account.Code != nil {
- statedb.SetCode(addr, *account.Code)
+ statedb.SetCode(addr, *account.Code, tracing.CodeChangeUnspecified)
}
// Override account balance.
if account.Balance != nil {
@@ -1271,6 +1271,9 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
+ if block.AccessList() != nil {
+ fields["accessList"] = block.AccessList()
+ }
return fields
}
diff --git a/internal/ethapi/override/override.go b/internal/ethapi/override/override.go
index 0bcf3c444d..9d57a78651 100644
--- a/internal/ethapi/override/override.go
+++ b/internal/ethapi/override/override.go
@@ -91,7 +91,7 @@ func (diff *StateOverride) Apply(statedb *state.StateDB, precompiles vm.Precompi
}
// Override account(contract) code.
if account.Code != nil {
- statedb.SetCode(addr, *account.Code)
+ statedb.SetCode(addr, *account.Code, tracing.CodeChangeUnspecified)
}
// Override account balance.
if account.Balance != nil {
diff --git a/internal/ethapi/simulate.go b/internal/ethapi/simulate.go
index 5c99aaad25..44b01cde72 100644
--- a/internal/ethapi/simulate.go
+++ b/internal/ethapi/simulate.go
@@ -372,7 +372,7 @@ func (sim *simulator) processBlock(ctx context.Context, block *simBlock, header,
}
blockBody := &types.Body{Transactions: txes, Withdrawals: *block.BlockOverrides.Withdrawals}
chainHeadReader := &simChainHeadReader{ctx, sim.b}
- b, err := sim.FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts)
+ b, _, err := sim.b.Engine().FinalizeAndAssemble(chainHeadReader, header, sim.state, blockBody, receipts, nil, nil)
if err != nil {
return nil, nil, nil, err
}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 03fb2a4680..5bea02d1f3 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -454,6 +454,16 @@ web3._extend({
call: 'debug_getTrieFlushInterval',
params: 0
}),
+ new web3._extend.Method({
+ name: 'getBlockAccessList',
+ call: 'debug_getBlockAccessList',
+ params: 1
+ }),
+ new web3._extend.Method({
+ name: 'getEncodedBlockAccessList',
+ call: 'debug_getEncodedBlockAccessList',
+ params: 1
+ }),
],
properties: []
});
diff --git a/miner/worker.go b/miner/worker.go
index e0f6c3c41f..dadf267300 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -26,6 +26,7 @@ import (
"time"
mapset "github.com/deckarep/golang-set/v2"
+ "github.com/ethereum/go-ethereum/core/tracing"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/common"
@@ -110,6 +111,8 @@ type environment struct {
witness *stateless.Witness
committed bool
+ alTracer *core.BlockAccessListTracer
+ vmConfig vm.Config
}
// discard terminates the background prefetcher go-routine. It should
@@ -608,13 +611,6 @@ func (w *worker) resultLoop() {
w.recentMinedBlocks.Add(block.NumberU64(), []common.Hash{block.ParentHash()})
}
- // add BAL to the block
- bal := task.state.GetEncodedBlockAccessList(block)
- if bal != nil && w.engine.SignBAL(bal) == nil {
- block = block.WithBAL(bal)
- }
- task.state.DumpAccessList(block)
-
// Commit block and state to database.
start := time.Now()
status, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, w.mux)
@@ -630,8 +626,14 @@ func (w *worker) resultLoop() {
stats := w.chain.GetBlockStats(block.Hash())
stats.SendBlockTime.Store(time.Now().UnixMilli())
stats.StartMiningTime.Store(task.miningStartAt.UnixMilli())
- log.Info("Successfully seal and write new block", "number", block.Number(), "hash", hash, "time", block.Header().MilliTimestamp(), "sealhash", sealhash,
- "block size(noBal)", block.Size(), "balSize", block.BALSize(), "elapsed", common.PrettyDuration(time.Since(task.createdAt)))
+ if w.chainConfig.IsCancun(block.Number(), block.Header().Time) && w.chainConfig.EnableBAL {
+ log.Info("Successfully seal and write new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
+ "accessListSize", block.AccessListSize(), "signData", common.Bytes2Hex(block.AccessList().SignData),
+ "elapsed", common.PrettyDuration(time.Since(task.createdAt)))
+ } else {
+ log.Info("Successfully seal and write new block", "number", block.Number(), "sealhash", sealhash, "hash", hash,
+ "elapsed", common.PrettyDuration(time.Since(task.createdAt)))
+ }
w.mux.Post(core.NewMinedBlockEvent{Block: block})
case <-w.exitCh:
@@ -645,7 +647,7 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co
prevEnv *environment, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top and start a prefetcher for
// the miner to speed block sealing up a bit
- state, err := w.chain.StateWithCacheAt(parent.Root)
+ sdb, err := w.chain.StateWithCacheAt(parent.Root)
if err != nil {
return nil, err
}
@@ -654,23 +656,34 @@ func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase co
if err != nil {
return nil, err
}
- state.StartPrefetcher("miner", bundle)
+ sdb.StartPrefetcher("miner", bundle)
} else {
if prevEnv == nil {
- state.StartPrefetcher("miner", nil)
+ sdb.StartPrefetcher("miner", nil)
} else {
- state.TransferPrefetcher(prevEnv.state)
+ sdb.TransferPrefetcher(prevEnv.state)
}
}
+ var alTracer *core.BlockAccessListTracer
+ var hooks *tracing.Hooks
+ var hookedState vm.StateDB = sdb
+ var vmConfig vm.Config
+ if w.chainConfig.IsEnableBAL() {
+ alTracer, hooks = core.NewBlockAccessListTracer()
+ hookedState = state.NewHookedState(sdb, hooks)
+ vmConfig.Tracer = hooks
+ }
// Note the passed coinbase may be different with header.Coinbase.
env := &environment{
signer: types.MakeSigner(w.chainConfig, header.Number, header.Time),
- state: state,
+ state: sdb,
coinbase: coinbase,
header: header,
- witness: state.Witness(),
- evm: vm.NewEVM(core.NewEVMBlockContext(header, w.chain, &coinbase), state, w.chainConfig, vm.Config{}),
+ witness: sdb.Witness(),
+ evm: vm.NewEVM(core.NewEVMBlockContext(header, w.chain, &coinbase), hookedState, w.chainConfig, vmConfig),
+ alTracer: alTracer,
+ vmConfig: vmConfig,
}
// Keep track of transactions which return errors so they can be removed
env.tcount = 0
@@ -760,7 +773,9 @@ func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transac
tx := txsPrefetch.PeekWithUnwrap()
if tx != nil {
txCurr := &tx
- w.prefetcher.PrefetchMining(txsPrefetch, env.header, env.gasPool.Gas(), env.state.StateForPrefetch(), *w.chain.GetVMConfig(), stopPrefetchCh, txCurr)
+ // PrefetchMining only warms up the state cache, it doesn't need tracer for state tracking.
+ // Using empty vm.Config avoids sharing journal state across concurrent goroutines.
+ w.prefetcher.PrefetchMining(txsPrefetch, env.header, env.gasPool.Gas(), env.state.StateForPrefetch(), vm.Config{}, stopPrefetchCh, txCurr)
}
signal := commitInterruptNone
@@ -1010,7 +1025,7 @@ func (w *worker) prepareWork(genParams *generateParams, witness bool) (*environm
}
// Handle upgrade built-in system contract code
- systemcontracts.TryUpdateBuildInSystemContract(w.chainConfig, header.Number, parent.Time, header.Time, env.state, true)
+ systemcontracts.TryUpdateBuildInSystemContract(w.chainConfig, header.Number, parent.Time, header.Time, env.evm.StateDB, true)
if header.ParentBeaconRoot != nil {
core.ProcessBeaconBlockRoot(*header.ParentBeaconRoot, env.evm)
@@ -1019,6 +1034,12 @@ func (w *worker) prepareWork(genParams *generateParams, witness bool) (*environm
if w.chainConfig.IsPrague(header.Number, header.Time) {
core.ProcessParentBlockHash(header.ParentHash, env.evm)
}
+
+ if w.chainConfig.IsEnableBAL() && env.alTracer != nil {
+ env.alTracer.OnPreTxExecutionDone()
+ log.Debug("Marked pre-tx operations done for BAL", "number", header.Number)
+ }
+
return env, nil
}
@@ -1129,6 +1150,7 @@ func (w *worker) generateWork(params *generateParams, witness bool) *newPayloadR
}
}
body := types.Body{Transactions: work.txs, Withdrawals: params.withdrawals}
+
allLogs := make([]*types.Log, 0)
for _, r := range work.receipts {
allLogs = append(allLogs, r.Logs...)
@@ -1156,7 +1178,7 @@ func (w *worker) generateWork(params *generateParams, witness bool) *newPayloadR
}
fees := work.state.GetBalance(consensus.SystemAddress)
- block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, &body, work.receipts, nil)
+ block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, work.header, work.state, &body, work.receipts, nil, nil)
if err != nil {
return &newPayloadResult{err: err}
}
@@ -1451,11 +1473,24 @@ func (w *worker) commit(env *environment, interval func(), start time.Time) erro
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(fees), big.NewFloat(params.Ether))
// Withdrawals are set to nil here, because this is only called in PoW.
finalizeStart := time.Now()
+ var accessList *types.BlockAccessListEncode
+ onBlockFinalization := func() {
+ if w.chainConfig.IsCancun(env.header.Number, env.header.Time) && w.chainConfig.EnableBAL && env.alTracer != nil {
+ env.alTracer.OnBlockFinalization()
+ accessList = env.alTracer.AccessListEncoded(env.header.Number.Uint64(), env.header.Hash())
+ }
+ }
+
body := types.Body{Transactions: env.txs}
if env.header.EmptyWithdrawalsHash() {
body.Withdrawals = make([]*types.Withdrawal, 0)
}
- block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, types.CopyHeader(env.header), env.state, &body, env.receipts, nil)
+ block, receipts, err := w.engine.FinalizeAndAssemble(w.chain, types.CopyHeader(env.header), env.state, &body, env.receipts, env.vmConfig.Tracer, onBlockFinalization)
+
+ if w.chainConfig.IsCancun(env.header.Number, env.header.Time) && w.chainConfig.EnableBAL && w.engine.SignBAL(accessList) == nil {
+ block = block.WithAccessList(accessList)
+ }
+
env.committed = true
if err != nil {
return err
diff --git a/node/config.go b/node/config.go
index 8e18fc7d62..bc8679756a 100644
--- a/node/config.go
+++ b/node/config.go
@@ -104,9 +104,6 @@ type Config struct {
// EnableQuickBlockFetching indicates whether to fetch new blocks using new messages.
EnableQuickBlockFetching bool `toml:",omitempty"`
- // EnableBAL enables the block access list feature
- EnableBAL bool `toml:",omitempty"`
-
// RangeLimit enable 5000 blocks limit when handle range query
RangeLimit bool `toml:",omitempty"`
diff --git a/p2p/peer.go b/p2p/peer.go
index 1f781fe0c3..02ff555bea 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -134,7 +134,7 @@ type Peer struct {
ProxyedPeerFlag atomic.Bool
// it indicates the peer can handle BAL(block access list) packet
- CanHandleBAL atomic.Bool
+ CanHandleBAL7928 atomic.Bool
}
// NewPeer returns a peer for testing purposes.
diff --git a/params/config.go b/params/config.go
index ed6512ece5..2d149b444a 100644
--- a/params/config.go
+++ b/params/config.go
@@ -684,6 +684,10 @@ type ChainConfig struct {
// those cases.
EnableVerkleAtGenesis bool `json:"enableVerkleAtGenesis,omitempty"`
+ // EnableBAL is a flag that specifies whether the node generate
+ // bal in block.
+ EnableBAL bool `json:"enableBAL,omitempty"`
+
RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated)
NielsBlock *big.Int `json:"nielsBlock,omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated)
MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated)
@@ -1344,6 +1348,10 @@ func (c *ChainConfig) IsVerkleGenesis() bool {
return c.EnableVerkleAtGenesis
}
+func (c *ChainConfig) IsEnableBAL() bool {
+ return c.EnableBAL
+}
+
// IsEIP4762 returns whether eip 4762 has been activated at given block.
func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool {
return c.IsVerkle(num, time)
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 4fb1caddb3..92a8f4e4be 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -514,7 +514,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
sdb := state.NewDatabase(triedb, nil)
statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts {
- statedb.SetCode(addr, a.Code)
+ statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeUnspecified)
statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceChangeUnspecified)
for k, v := range a.Storage {
diff --git a/trie/dummy_trie.go b/trie/dummy_trie.go
index 7b7e04c95e..ebde4dc179 100644
--- a/trie/dummy_trie.go
+++ b/trie/dummy_trie.go
@@ -47,6 +47,14 @@ func (t *EmptyTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil
}
+func (t *EmptyTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
+ return nil
+}
+
+func (t *EmptyTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
+ return nil
+}
+
// UpdateAccount abstract an account write in the trie.
func (t *EmptyTrie) UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error {
return nil
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index 967b9e603b..cf65ff550a 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -190,6 +190,29 @@ func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error {
return nil
}
+// UpdateStorageBatch attempts to update a list storages in the batch manner.
+func (t *StateTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
+ var (
+ hkeys = make([][]byte, 0, len(keys))
+ evals = make([][]byte, 0, len(values))
+ )
+ for _, key := range keys {
+ hk := crypto.Keccak256(key)
+ if t.preimages != nil {
+ t.secKeyCache[common.Hash(hk)] = key
+ }
+ hkeys = append(hkeys, hk)
+ }
+ for _, val := range values {
+ data, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ return err
+ }
+ evals = append(evals, data)
+ }
+ return t.trie.UpdateBatch(hkeys, evals)
+}
+
// UpdateAccount will abstract the write of an account to the secure trie.
func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccount, _ int) error {
hk := crypto.Keccak256(address.Bytes())
@@ -206,6 +229,29 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}
+// UpdateAccountBatch attempts to update a list accounts in the batch manner.
+func (t *StateTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
+ var (
+ hkeys = make([][]byte, 0, len(addresses))
+ values = make([][]byte, 0, len(accounts))
+ )
+ for _, addr := range addresses {
+ hk := crypto.Keccak256(addr.Bytes())
+ if t.preimages != nil {
+ t.secKeyCache[common.Hash(hk)] = addr.Bytes()
+ }
+ hkeys = append(hkeys, hk)
+ }
+ for _, acc := range accounts {
+ data, err := rlp.EncodeToBytes(acc)
+ if err != nil {
+ return err
+ }
+ values = append(values, data)
+ }
+ return t.trie.UpdateBatch(hkeys, values)
+}
+
func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil
}
diff --git a/trie/tracer.go b/trie/tracer.go
index 90b9666f0b..4c8aee400d 100644
--- a/trie/tracer.go
+++ b/trie/tracer.go
@@ -18,6 +18,7 @@ package trie
import (
"maps"
+ "sync"
"github.com/ethereum/go-ethereum/common"
)
@@ -45,6 +46,7 @@ type tracer struct {
inserts map[string]struct{}
deletes map[string]struct{}
accessList map[string][]byte
+ rwlock sync.RWMutex
}
// newTracer initializes the tracer for capturing trie changes.
@@ -53,6 +55,7 @@ func newTracer() *tracer {
inserts: make(map[string]struct{}),
deletes: make(map[string]struct{}),
accessList: make(map[string][]byte),
+ rwlock: sync.RWMutex{},
}
}
@@ -60,6 +63,8 @@ func newTracer() *tracer {
// blob internally. Don't change the value outside of function since
// it's not deep-copied.
func (t *tracer) onRead(path []byte, val []byte) {
+ t.rwlock.Lock()
+ defer t.rwlock.Unlock()
t.accessList[string(path)] = val
}
@@ -67,6 +72,8 @@ func (t *tracer) onRead(path []byte, val []byte) {
// in the deletion set (resurrected node), then just wipe it from
// the deletion set as it's "untouched".
func (t *tracer) onInsert(path []byte) {
+ t.rwlock.Lock()
+ defer t.rwlock.Unlock()
if _, present := t.deletes[string(path)]; present {
delete(t.deletes, string(path))
return
@@ -78,6 +85,8 @@ func (t *tracer) onInsert(path []byte) {
// in the addition set, then just wipe it from the addition set
// as it's untouched.
func (t *tracer) onDelete(path []byte) {
+ t.rwlock.Lock()
+ defer t.rwlock.Unlock()
if _, present := t.inserts[string(path)]; present {
delete(t.inserts, string(path))
return
@@ -87,6 +96,8 @@ func (t *tracer) onDelete(path []byte) {
// reset clears the content tracked by tracer.
func (t *tracer) reset() {
+ t.rwlock.Lock()
+ defer t.rwlock.Unlock()
t.inserts = make(map[string]struct{})
t.deletes = make(map[string]struct{})
t.accessList = make(map[string][]byte)
@@ -94,6 +105,8 @@ func (t *tracer) reset() {
// copy returns a deep copied tracer instance.
func (t *tracer) copy() *tracer {
+ t.rwlock.RLock()
+ defer t.rwlock.RUnlock()
accessList := make(map[string][]byte, len(t.accessList))
for path, blob := range t.accessList {
accessList[path] = common.CopyBytes(blob)
@@ -107,6 +120,8 @@ func (t *tracer) copy() *tracer {
// deletedNodes returns a list of node paths which are deleted from the trie.
func (t *tracer) deletedNodes() []string {
+ t.rwlock.Lock()
+ defer t.rwlock.Unlock()
var paths []string
for path := range t.deletes {
// It's possible a few deleted nodes were embedded
diff --git a/trie/transition.go b/trie/transition.go
new file mode 100644
index 0000000000..67c05e8c10
--- /dev/null
+++ b/trie/transition.go
@@ -0,0 +1,235 @@
+// Copyright 2025 go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-verkle"
+)
+
+// TransitionTrie is a trie that implements a façade design pattern, presenting
+// a single interface to the old MPT trie and the new verkle/binary trie. Reads
+// first from the overlay trie, and falls back to the base trie if the key isn't
+// found. All writes go to the overlay trie.
+type TransitionTrie struct {
+ overlay *VerkleTrie
+ base *SecureTrie
+ storage bool
+}
+
+// NewTransitionTrie creates a new TransitionTrie.
+func NewTransitionTrie(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie {
+ return &TransitionTrie{
+ overlay: overlay,
+ base: base,
+ storage: st,
+ }
+}
+
+func (t *TransitionTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, _ []int) error {
+ panic("not implemented")
+}
+
+func (t *TransitionTrie) UpdateStorageBatch(_ common.Address, keys [][]byte, values [][]byte) error {
+ panic("not implemented")
+}
+
+// Base returns the base trie.
+func (t *TransitionTrie) Base() *SecureTrie {
+ return t.base
+}
+
+// Overlay returns the overlay trie.
+func (t *TransitionTrie) Overlay() *VerkleTrie {
+ return t.overlay
+}
+
+// GetKey returns the sha3 preimage of a hashed key that was previously used
+// to store a value.
+func (t *TransitionTrie) GetKey(key []byte) []byte {
+ if key := t.overlay.GetKey(key); key != nil {
+ return key
+ }
+ return t.base.GetKey(key)
+}
+
+// GetStorage returns the value for key stored in the trie. The value bytes must
+// not be modified by the caller.
+func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
+ val, err := t.overlay.GetStorage(addr, key)
+ if err != nil {
+ return nil, fmt.Errorf("get storage from overlay: %s", err)
+ }
+ if len(val) != 0 {
+ return val, nil
+ }
+ // TODO also insert value into overlay
+ return t.base.GetStorage(addr, key)
+}
+
+// PrefetchStorage attempts to resolve specific storage slots from the database
+// to accelerate subsequent trie operations.
+func (t *TransitionTrie) PrefetchStorage(addr common.Address, keys [][]byte) error {
+ for _, key := range keys {
+ if _, err := t.GetStorage(addr, key); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetAccount abstract an account read from the trie.
+func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount, error) {
+ data, err := t.overlay.GetAccount(address)
+ if err != nil {
+ // Post cancun, no indicator needs to be used to indicate that
+ // an account was deleted in the overlay tree. If an error is
+ // returned, then it's a genuine error, and not an indicator
+ // that a tombstone was found.
+ return nil, err
+ }
+ if data != nil {
+ return data, nil
+ }
+ return t.base.GetAccount(address)
+}
+
+// PrefetchAccount attempts to resolve specific accounts from the database
+// to accelerate subsequent trie operations.
+func (t *TransitionTrie) PrefetchAccount(addresses []common.Address) error {
+ for _, addr := range addresses {
+ if _, err := t.GetAccount(addr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UpdateStorage associates key with value in the trie. If value has length zero, any
+// existing value is deleted from the trie. The value bytes must not be modified
+// by the caller while they are stored in the trie.
+func (t *TransitionTrie) UpdateStorage(address common.Address, key []byte, value []byte) error {
+ var v []byte
+ if len(value) >= 32 {
+ v = value[:32]
+ } else {
+ var val [32]byte
+ copy(val[32-len(value):], value[:])
+ v = val[:]
+ }
+ return t.overlay.UpdateStorage(address, key, v)
+}
+
+// UpdateAccount abstract an account write to the trie.
+func (t *TransitionTrie) UpdateAccount(addr common.Address, account *types.StateAccount, codeLen int) error {
+ // NOTE: before the rebase, this was saving the state root, so that OpenStorageTrie
+ // could still work during a replay. This is no longer needed, as OpenStorageTrie
+ // only needs to know what the account trie does now.
+ return t.overlay.UpdateAccount(addr, account, codeLen)
+}
+
+// DeleteStorage removes any existing value for key from the trie. If a node was not
+// found in the database, a trie.MissingNodeError is returned.
+func (t *TransitionTrie) DeleteStorage(addr common.Address, key []byte) error {
+ return t.overlay.DeleteStorage(addr, key)
+}
+
+// DeleteAccount abstracts an account deletion from the trie.
+func (t *TransitionTrie) DeleteAccount(key common.Address) error {
+ return t.overlay.DeleteAccount(key)
+}
+
+// Hash returns the root hash of the trie. It does not write to the database and
+// can be used even if the trie doesn't have one.
+func (t *TransitionTrie) Hash() common.Hash {
+ return t.overlay.Hash()
+}
+
+// Commit collects all dirty nodes in the trie and replace them with the
+// corresponding node hash. All collected nodes(including dirty leaves if
+// collectLeaf is true) will be encapsulated into a nodeset for return.
+// The returned nodeset can be nil if the trie is clean(nothing to commit).
+// Once the trie is committed, it's not usable anymore. A new trie must
+// be created with new root and updated trie database for following usage
+func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) {
+ // Just return if the trie is a storage trie: otherwise,
+ // the overlay trie will be committed as many times as
+ // there are storage tries. This would kill performance.
+ if t.storage {
+ return common.Hash{}, nil
+ }
+ return t.overlay.Commit(collectLeaf)
+}
+
+// NodeIterator returns an iterator that returns nodes of the trie. Iteration
+// starts at the key after the given start key.
+func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) {
+ panic("not implemented") // TODO: Implement
+}
+
+// Prove constructs a Merkle proof for key. The result contains all encoded nodes
+// on the path to the value at key. The value itself is also included in the last
+// node and can be retrieved by verifying the proof.
+//
+// If the trie does not contain a value for key, the returned proof contains all
+// nodes of the longest existing prefix of the key (at least the root), ending
+// with the node that proves the absence of the key.
+func (t *TransitionTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
+ panic("not implemented") // TODO: Implement
+}
+
+// IsVerkle returns true if the trie is verkle-tree based
+func (t *TransitionTrie) IsVerkle() bool {
+ // For all intents and purposes, the calling code should treat this as a verkle trie
+ return true
+}
+
+// UpdateStem updates a group of values, given the stem they are using. If
+// a value already exists, it is overwritten.
+func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error {
+ trie := t.overlay
+ switch root := trie.root.(type) {
+ case *verkle.InternalNode:
+ return root.InsertValuesAtStem(key, values, t.overlay.nodeResolver)
+ default:
+ panic("invalid root type")
+ }
+}
+
+// Copy creates a deep copy of the transition trie.
+func (t *TransitionTrie) Copy() *TransitionTrie {
+ return &TransitionTrie{
+ overlay: t.overlay.Copy(),
+ base: t.base.Copy(),
+ storage: t.storage,
+ }
+}
+
+// UpdateContractCode updates the contract code for the given address.
+func (t *TransitionTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error {
+ return t.overlay.UpdateContractCode(addr, codeHash, code)
+}
+
+// Witness returns a set containing all trie nodes that have been accessed.
+func (t *TransitionTrie) Witness() map[string][]byte {
+ panic("not implemented")
+}
diff --git a/trie/trie.go b/trie/trie.go
index a8072d9b53..0f601fb00b 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb/database"
+ "golang.org/x/sync/errgroup"
)
// Trie represents a Merkle Patricia Trie. Use New to create a trie that operates
@@ -402,6 +403,72 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error
}
}
+// UpdateBatch updates a batch of entries concurrently.
+func (t *Trie) UpdateBatch(keys [][]byte, values [][]byte) error {
+ // Short circuit if the trie is already committed and unusable.
+ if t.committed {
+ return ErrCommitted
+ }
+ if len(keys) != len(values) {
+ return fmt.Errorf("keys and values length mismatch: %d != %d", len(keys), len(values))
+ }
+ // Insert the entries sequentially if there are not too many
+ // trie nodes in the trie.
+ fn, ok := t.root.(*fullNode)
+
+ if !ok || len(keys) < 4 { // TODO(rjl493456442) the parallelism threshold should be twisted
+ for i, key := range keys {
+ err := t.Update(key, values[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ var (
+ ikeys = make(map[byte][][]byte)
+ ivals = make(map[byte][][]byte)
+ eg errgroup.Group
+ )
+ for i, key := range keys {
+ hkey := keybytesToHex(key)
+ ikeys[hkey[0]] = append(ikeys[hkey[0]], hkey)
+ ivals[hkey[0]] = append(ivals[hkey[0]], values[i])
+ }
+ if len(keys) > 0 {
+ fn.flags = t.newFlag()
+ }
+ for p, k := range ikeys {
+ pos := p
+ ks := k
+ eg.Go(func() error {
+ vs := ivals[pos]
+ for i, k := range ks {
+ if len(vs[i]) != 0 {
+ _, n, err := t.insert(fn.Children[pos], []byte{pos}, k[1:], valueNode(vs[i]))
+ if err != nil {
+ return err
+ }
+ fn.Children[pos] = n
+ } else {
+ _, n, err := t.delete(fn.Children[pos], []byte{pos}, k[1:])
+ if err != nil {
+ return err
+ }
+ fn.Children[pos] = n
+ }
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+ t.unhashed += len(keys)
+ t.uncommitted += len(keys)
+ return nil
+}
+
// MustDelete is a wrapper of Delete and will omit any encountered error but
// just print out an error message.
func (t *Trie) MustDelete(key []byte) {
diff --git a/trie/trie_test.go b/trie/trie_test.go
index b806ae6b0c..5b99badff1 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -449,9 +449,9 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect new node")
}
- //if len(n.Prev) > 0 {
+ // if len(n.Prev) > 0 {
// return errors.New("unexpected origin value")
- //}
+ // }
}
// Check deletion set
for path := range deletes {
@@ -459,12 +459,12 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || !n.IsDeleted() {
return errors.New("expect deleted node")
}
- //if len(n.Prev) == 0 {
+ // if len(n.Prev) == 0 {
// return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
+ // }
+ // if !bytes.Equal(n.Prev, blob) {
// return errors.New("invalid origin value")
- //}
+ // }
}
// Check update set
for path := range updates {
@@ -472,12 +472,12 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error {
if !ok || n.IsDeleted() {
return errors.New("expect updated node")
}
- //if len(n.Prev) == 0 {
+ // if len(n.Prev) == 0 {
// return errors.New("expect origin value")
- //}
- //if !bytes.Equal(n.Prev, blob) {
+ // }
+ // if !bytes.Equal(n.Prev, blob) {
// return errors.New("invalid origin value")
- //}
+ // }
}
return nil
}
@@ -698,7 +698,7 @@ func BenchmarkHash(b *testing.B) {
}
b.ResetTimer()
b.ReportAllocs()
- //trie.hashRoot(nil, nil)
+ // trie.hashRoot(nil, nil)
trie.Hash()
}
@@ -1500,3 +1500,57 @@ func testTrieCopyNewTrie(t *testing.T, entries []kv) {
t.Errorf("Hash mismatch: old %v, new %v", hash, tr.Hash())
}
}
+
+func TestUpdateBatch(t *testing.T) {
+ testUpdateBatch(t, []kv{
+ {k: []byte("do"), v: []byte("verb")},
+ {k: []byte("ether"), v: []byte("wookiedoo")},
+ {k: []byte("horse"), v: []byte("stallion")},
+ {k: []byte("shaman"), v: []byte("horse")},
+ {k: []byte("doge"), v: []byte("coin")},
+ {k: []byte("dog"), v: []byte("puppy")},
+ })
+
+ var entries []kv
+ for i := 0; i < 256; i++ {
+ entries = append(entries, kv{k: testrand.Bytes(32), v: testrand.Bytes(32)})
+ }
+ testUpdateBatch(t, entries)
+}
+
+func testUpdateBatch(t *testing.T, entries []kv) {
+ var (
+ base = NewEmpty(nil)
+ keys [][]byte
+ vals [][]byte
+ )
+ for _, entry := range entries {
+ base.Update(entry.k, entry.v)
+ keys = append(keys, entry.k)
+ vals = append(vals, entry.v)
+ }
+ for i := 0; i < 10; i++ {
+ k, v := testrand.Bytes(32), testrand.Bytes(32)
+ base.Update(k, v)
+ keys = append(keys, k)
+ vals = append(vals, v)
+ }
+
+ cmp := NewEmpty(nil)
+ if err := cmp.UpdateBatch(keys, vals); err != nil {
+ t.Fatalf("Failed to update batch, %v", err)
+ }
+
+ // Traverse the original tree, the changes made on the copy one shouldn't
+ // affect the old one
+ for _, key := range keys {
+ v1, _ := base.Get(key)
+ v2, _ := cmp.Get(key)
+ if !bytes.Equal(v1, v2) {
+ t.Errorf("Unexpected data, key: %v, want: %v, got: %v", key, v1, v2)
+ }
+ }
+ if base.Hash() != cmp.Hash() {
+ t.Errorf("Hash mismatch: want %x, got %x", base.Hash(), cmp.Hash())
+ }
+}
diff --git a/trie/verkle.go b/trie/verkle.go
index 015b8f6590..9427b13021 100644
--- a/trie/verkle.go
+++ b/trie/verkle.go
@@ -156,6 +156,22 @@ func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount,
return nil
}
+// UpdateAccountBatch attempts to update a list accounts in the batch manner.
+func (t *VerkleTrie) UpdateAccountBatch(addresses []common.Address, accounts []*types.StateAccount, codeLens []int) error {
+ if len(addresses) != len(accounts) {
+ return fmt.Errorf("address and accounts length mismatch: %d != %d", len(addresses), len(accounts))
+ }
+ if len(addresses) != len(codeLens) {
+ return fmt.Errorf("address and code length mismatch: %d != %d", len(addresses), len(codeLens))
+ }
+ for i, addr := range addresses {
+ if err := t.UpdateAccount(addr, accounts[i], codeLens[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// UpdateStorage implements state.Trie, writing the provided storage slot into
// the tree. If the tree is corrupted, an error will be returned.
func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error {
@@ -170,6 +186,19 @@ func (t *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) er
return t.root.Insert(k, v[:], t.nodeResolver)
}
+// UpdateStorageBatch attempts to update a list storages in the batch manner.
+func (t *VerkleTrie) UpdateStorageBatch(address common.Address, keys [][]byte, values [][]byte) error {
+ if len(keys) != len(values) {
+ return fmt.Errorf("keys and values length mismatch: %d != %d", len(keys), len(values))
+ }
+ for i, key := range keys {
+ if err := t.UpdateStorage(address, key, values[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// DeleteAccount leaves the account untouched, as no account deletion can happen
// in verkle.
// There is a special corner case, in which an account that is prefunded, CREATE2-d