diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2a55bf4fa0..548c99acfa 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,35 @@
# Changelog
+## v1.4.2
+### Feature
+* [\#2021](https://github.com/bnb-chain/bsc/pull/2021) feat: support separate trie database
+* [\#2224](https://github.com/bnb-chain/bsc/pull/2224) feat: support MEV
+
+### BUGFIX
+* [\#2268](https://github.com/bnb-chain/bsc/pull/2268) fix: ensure EIP-4788 not supported with Parlia Engine
+
+### Cancun Code Merge
+#### 4844 related
+[internal/ethapi: add support for blobs in eth_fillTransaction (#28839)](https://github.com/bnb-chain/bsc/commit/ac5aa672d3b85a1f74667a65a15398f072aa0b2a)
+[internal/ethapi: fix defaults for blob fields (#29037)](https://github.com/bnb-chain/bsc/commit/b47cf8fe1de4f97ce38417d8136a58812734a7a9)
+[ethereum, ethclient: add blob transaction fields in CallMsg (#28989)](https://github.com/bnb-chain/bsc/commit/9d537f543990d9013d73433dc58fd0e985d9b2b6)
+[core/txpool/blobpool: post-crash cleanup and addition/removal metrics(#28914)](https://github.com/bnb-chain/bsc/commit/62affdc9c5ea6f1a73fde42ac5ee5c9795877f88)
+[core/txpool/blobpool: update the blob db with corruption handling (#29001)](https://github.com/bnb-chain/bsc/commit/3c30de219f92120248b7b7aeeb2bef82305e9627)
+[core/txpool, eth, miner: pre-filter dynamic fees during pending tx retrieval (#29005)](https://github.com/bnb-chain/bsc/commit/593e303485473d9b9194792e4556a451c44dcc6c)
+[core/txpool, miner: speed up blob pool pending retrievals (#29008)](https://github.com/bnb-chain/bsc/commit/6fb0d0992bd4eb91faf1e081b3c4aa46adb0ef7d)
+[core/txpool, eth, miner: retrieve plain and blob txs separately (#29026)](https://github.com/bnb-chain/bsc/commit/f4852b8ddc8bef962d34210a4f7774b95767e421)
+[core/txpool: reject blob txs with blob fee cap below the minimum (#29081)](https://github.com/bnb-chain/bsc/commit/32d4d6e6160432be1cb9780a43253deda7708ced)
+[core/txpool/blobpool: reduce default database cap for rollout (#29090)](https://github.com/bnb-chain/bsc/commit/63aaac81007ad46b208570c17cae78b7f60931d4)
+#### Clean Ups
+[cmd/devp2p, eth: drop support for eth/67 (#28956)](https://github.com/bnb-chain/bsc/commit/8a76a814a2b9e5b4c1a4c6de44cd702536104507)
+[all: remove the dependency from trie to triedb (#28824)](https://github.com/bnb-chain/bsc/commit/fe91d476ba3e29316b6dc99b6efd4a571481d888)
+#### Others
+[eth, miner: fix enforcing the minimum miner tip (#28933)](https://github.com/bnb-chain/bsc/commit/16ce7bf50fa71c907d1dc6504ed32a9161e71351)
+[cmd,internal/era: implement export-history subcommand(#26621)](https://github.com/bnb-chain/bsc/commit/1f50aa76318689c6e74d0c3b4f31421bf7382fc7)
+[node, rpc: add configurable HTTP request limit (#28948)](https://github.com/bnb-chain/bsc/commit/69f5d5ba1fe355ff7e3dee5a0c7e662cd82f1071)
+[tests: fix goroutine leak related to state snapshot generation (#28974)](https://github.com/bnb-chain/bsc/commit/8321fe2fda0b44d6df3750bcee28b8627525173b)
+[internal/ethapi:fix zero rpc gas cap in eth_createAccessList (#28846)](https://github.com/bnb-chain/bsc/commit/b87b9b45331f87fb1da379c5f17a81ebc3738c6e)
+[eth/tracers: Fix callTracer logs on onlyTopCall == true (#29068)](https://github.com/bnb-chain/bsc/commit/5a0f468f8cb15b939bd85445d33c614a36942a8e)
+
## v1.4.1
FEATURE
NA
diff --git a/Makefile b/Makefile
index 36d89e60e5..4b46068866 100644
--- a/Makefile
+++ b/Makefile
@@ -11,14 +11,17 @@ GORUN = go run
GIT_COMMIT=$(shell git rev-parse HEAD)
GIT_COMMIT_DATE=$(shell git log -n1 --pretty='format:%cd' --date=format:'%Y%m%d')
+#? geth: Build geth
geth:
$(GORUN) build/ci.go install ./cmd/geth
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
+#? all: Build all packages and executables
all:
$(GORUN) build/ci.go install
+#? test: Run the tests
test: all
$(GORUN) build/ci.go test -timeout 1h
@@ -32,9 +35,11 @@ truffle-test:
docker-compose -f ./tests/truffle/docker-compose.yml up --exit-code-from truffle-test truffle-test
docker-compose -f ./tests/truffle/docker-compose.yml down
+#? lint: Run certain pre-selected linters
lint: ## Run linters.
$(GORUN) build/ci.go lint
+#? clean: Clean go cache, built executables, and the auto generated folder
clean:
go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/*
@@ -42,6 +47,7 @@ clean:
# The devtools target installs tools required for 'go generate'.
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
+#? devtools: Install recommended developer tools
devtools:
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
env GOBIN= go install github.com/fjl/gencodec@latest
@@ -50,5 +56,12 @@ devtools:
@type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc'
+#? help: Build docker image
docker:
docker build --pull -t bnb-chain/bsc:latest -f Dockerfile .
+
+#? help: Get more info on make commands.
+help: Makefile
+ @echo " Choose a command run in go-ethereum:"
+ @sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /'
+.PHONY: help
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 4abf298068..c7bc2b4541 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -29,7 +29,7 @@ import (
)
// The ABI holds information about a contract's context and available
-// invokable methods. It will allow you to type check function calls and
+// invocable methods. It will allow you to type check function calls and
// packs data accordingly.
type ABI struct {
Constructor Method
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 756a9d3552..dfd9296952 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -20,7 +20,7 @@ import (
"context"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient/simulated"
)
@@ -43,7 +43,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parentHash common.Hash) err
//
// Deprecated: please use simulated.Backend from package
// github.com/ethereum/go-ethereum/ethclient/simulated instead.
-func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
+func NewSimulatedBackend(alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit))
return &SimulatedBackend{
Backend: b,
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index 11bfd0378d..4a3a7c4bdf 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -289,7 +289,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -297,7 +297,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy an interaction tester contract and call a transaction on it
@@ -345,7 +345,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -353,7 +353,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it
@@ -391,7 +391,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -399,7 +399,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it
@@ -449,7 +449,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -457,7 +457,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a slice tester contract and execute a n array call on it
@@ -497,7 +497,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -505,7 +505,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a default method invoker contract and execute its default method
@@ -564,7 +564,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -572,7 +572,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a structs method invoker contract and execute its default method
@@ -610,12 +610,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
`,
`
// Create a simulator and wrap a non-deployed contract
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
defer sim.Close()
nonexistent, err := NewNonExistent(common.Address{}, sim)
@@ -649,12 +649,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
`,
`
// Create a simulator and wrap a non-deployed contract
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000))
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
defer sim.Close()
nonexistent, err := NewNonExistentStruct(common.Address{}, sim)
@@ -696,7 +696,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -704,7 +704,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a funky gas pattern contract
@@ -746,7 +746,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -754,7 +754,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a sender tester contract and execute a structured call on it
@@ -821,7 +821,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -829,7 +829,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a underscorer tester contract and execute a structured call on it
@@ -915,7 +915,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -923,7 +923,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy an eventer contract
@@ -1105,7 +1105,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -1113,7 +1113,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
//deploy the test contract
@@ -1240,7 +1240,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
@@ -1248,7 +1248,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
_, _, contract, err := DeployTuple(auth, sim)
@@ -1382,7 +1382,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -1390,7 +1390,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
//deploy the test contract
@@ -1448,14 +1448,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
// Initialize test accounts
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// deploy the test contract
@@ -1537,7 +1537,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
`,
`
// Initialize test accounts
@@ -1545,7 +1545,7 @@ var bindTests = []struct {
addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@@ -1600,14 +1600,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
`,
`
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@@ -1661,7 +1661,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
@@ -1669,7 +1669,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close()
// Deploy a tester contract and execute a structured call on it
@@ -1722,14 +1722,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
`,
`
key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey)
- sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
+ sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
defer sim.Close()
opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@@ -1810,7 +1810,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
@@ -1818,7 +1818,7 @@ var bindTests = []struct {
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
@@ -1881,7 +1881,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
@@ -1889,7 +1889,7 @@ var bindTests = []struct {
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
@@ -1934,7 +1934,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
@@ -1942,7 +1942,7 @@ var bindTests = []struct {
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
@@ -1983,7 +1983,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
@@ -1991,7 +1991,7 @@ var bindTests = []struct {
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
@@ -2024,7 +2024,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
@@ -2032,7 +2032,7 @@ var bindTests = []struct {
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
- sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
+ sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
_, tx, _, err := DeployRangeKeyword(user, sim)
if err != nil {
diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go
index 9fd919a295..592465f2ac 100644
--- a/accounts/abi/bind/util_test.go
+++ b/accounts/abi/bind/util_test.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient/simulated"
@@ -57,7 +56,7 @@ func TestWaitDeployed(t *testing.T) {
t.Parallel()
for name, test := range waitDeployedTests {
backend := simulated.NewBackend(
- core.GenesisAlloc{
+ types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
)
@@ -65,7 +64,7 @@ func TestWaitDeployed(t *testing.T) {
// Create the transaction
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
- gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
+ gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
@@ -102,7 +101,7 @@ func TestWaitDeployed(t *testing.T) {
func TestWaitDeployedCornerCases(t *testing.T) {
backend := simulated.NewBackend(
- core.GenesisAlloc{
+ types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
},
)
diff --git a/accounts/scwallet/hub.go b/accounts/scwallet/hub.go
index f9dcf58e19..5f1f369ca2 100644
--- a/accounts/scwallet/hub.go
+++ b/accounts/scwallet/hub.go
@@ -241,7 +241,7 @@ func (hub *Hub) refreshWallets() {
card.Disconnect(pcsc.LeaveCard)
continue
}
- // Card connected, start tracking in amongs the wallets
+ // Card connected, start tracking among the wallets
hub.wallets[reader] = wallet
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
}
diff --git a/accounts/usbwallet/trezor/trezor.go b/accounts/usbwallet/trezor/trezor.go
index 7e756e609b..cdca6b4e0b 100644
--- a/accounts/usbwallet/trezor/trezor.go
+++ b/accounts/usbwallet/trezor/trezor.go
@@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website:
-// https://wiki.trezor.io/Developers_guide-Message_Workflows
+// https://docs.trezor.io/trezor-firmware/common/message-workflows.html
// !!! STAHP !!!
//
diff --git a/beacon/engine/types.go b/beacon/engine/types.go
index f72319ad50..60accc3c79 100644
--- a/beacon/engine/types.go
+++ b/beacon/engine/types.go
@@ -303,3 +303,21 @@ type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
}
+
+// Client identifiers to support ClientVersionV1.
+const (
+ ClientCode = "GE"
+ ClientName = "go-ethereum"
+)
+
+// ClientVersionV1 contains information which identifies a client implementation.
+type ClientVersionV1 struct {
+ Code string `json:"code"`
+ Name string `json:"clientName"`
+ Version string `json:"version"`
+ Commit string `json:"commit"`
+}
+
+func (v *ClientVersionV1) String() string {
+ return fmt.Sprintf("%s-%s-%s-%s", v.Code, v.Name, v.Version, v.Commit)
+}
diff --git a/build/checksums.txt b/build/checksums.txt
index 96815ff791..03a53946df 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,9 +1,9 @@
# This file contains sha256 checksums of optional build dependencies.
-# version:spec-tests 1.0.6
+# version:spec-tests 2.1.0
# https://github.com/ethereum/execution-spec-tests/releases
-# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/
-485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz
+# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
+ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
# version:golang 1.21.6
# https://go.dev/dl/
diff --git a/build/ci.go b/build/ci.go
index 9c2680384e..31ffa4a7b6 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -121,14 +121,13 @@ var (
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
- // kinetic
+ // kinetic, lunar
debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", // 14.04, EOL: 04/2024
"xenial": "golang-go", // 16.04, EOL: 04/2026
"bionic": "golang-go", // 18.04, EOL: 04/2028
"focal": "golang-go", // 20.04, EOL: 04/2030
"jammy": "golang-go", // 22.04, EOL: 04/2032
- "lunar": "golang-go", // 23.04, EOL: 01/2024
"mantic": "golang-go", // 23.10, EOL: 07/2024
}
diff --git a/cmd/clef/README.md b/cmd/clef/README.md
index 3a43db8c95..cf09265136 100644
--- a/cmd/clef/README.md
+++ b/cmd/clef/README.md
@@ -916,7 +916,7 @@ There are a couple of implementation for a UI. We'll try to keep this list up to
| Name | Repo | UI type| No external resources| Blocky support| Verifies permissions | Hash information | No secondary storage | Statically linked| Can modify parameters|
| ---- | ---- | -------| ---- | ---- | ---- |---- | ---- | ---- | ---- |
-| QtSigner| https://github.com/holiman/qtsigner/| Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
-| GtkSigner| https://github.com/holiman/gtksigner| Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
-| Frame | https://github.com/floating/frame/commits/go-signer| Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
-| Clef UI| https://github.com/ethereum/clef-ui| Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|
+| QtSigner| https://github.com/holiman/qtsigner/ | Python3/QT-based| :+1:| :+1:| :+1:| :+1:| :+1:| :x: | :+1: (partially)|
+| GtkSigner| https://github.com/holiman/gtksigner | Python3/GTK-based| :+1:| :x:| :x:| :+1:| :+1:| :x: | :x: |
+| Frame | https://github.com/floating/frame/commits/go-signer | Electron-based| :x:| :x:| :x:| :x:| ?| :x: | :x: |
+| Clef UI| https://github.com/ethereum/clef-ui | Golang/QT-based| :+1:| :+1:| :x:| :+1:| :+1:| :x: | :+1: (approve tx only)|
diff --git a/cmd/clef/datatypes.md b/cmd/clef/datatypes.md
index dd8cda5846..8456edfa35 100644
--- a/cmd/clef/datatypes.md
+++ b/cmd/clef/datatypes.md
@@ -75,7 +75,7 @@ Example:
},
{
"type": "Info",
- "message": "User should see this aswell"
+ "message": "User should see this as well"
}
],
"meta": {
diff --git a/cmd/devp2p/internal/ethtest/conn.go b/cmd/devp2p/internal/ethtest/conn.go
index e13f47082a..c4f1bb1ff4 100644
--- a/cmd/devp2p/internal/ethtest/conn.go
+++ b/cmd/devp2p/internal/ethtest/conn.go
@@ -166,7 +166,7 @@ func (c *Conn) ReadEth() (any, error) {
case eth.TransactionsMsg:
msg = new(eth.TransactionsPacket)
case eth.NewPooledTransactionHashesMsg:
- msg = new(eth.NewPooledTransactionHashesPacket68)
+ msg = new(eth.NewPooledTransactionHashesPacket)
case eth.GetPooledTransactionsMsg:
msg = new(eth.GetPooledTransactionsPacket)
case eth.PooledTransactionsMsg:
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index 4f499d41d8..d9efe26244 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -64,23 +64,23 @@ func NewSuite(dest *enode.Node, chainDir, engineURL, jwt string) (*Suite, error)
func (s *Suite) EthTests() []utesting.Test {
return []utesting.Test{
// status
- {Name: "TestStatus", Fn: s.TestStatus},
+ {Name: "Status", Fn: s.TestStatus},
// get block headers
- {Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders},
- {Name: "TestSimultaneousRequests", Fn: s.TestSimultaneousRequests},
- {Name: "TestSameRequestID", Fn: s.TestSameRequestID},
- {Name: "TestZeroRequestID", Fn: s.TestZeroRequestID},
+ {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
+ {Name: "SimultaneousRequests", Fn: s.TestSimultaneousRequests},
+ {Name: "SameRequestID", Fn: s.TestSameRequestID},
+ {Name: "ZeroRequestID", Fn: s.TestZeroRequestID},
// get block bodies
- {Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies},
+ {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
// // malicious handshakes + status
- {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake},
- {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus},
+ {Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake},
+ {Name: "MaliciousStatus", Fn: s.TestMaliciousStatus},
// test transactions
- {Name: "TestLargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
- {Name: "TestTransaction", Fn: s.TestTransaction},
- {Name: "TestInvalidTxs", Fn: s.TestInvalidTxs},
- {Name: "TestNewPooledTxs", Fn: s.TestNewPooledTxs},
- {Name: "TestBlobViolations", Fn: s.TestBlobViolations},
+ {Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
+ {Name: "Transaction", Fn: s.TestTransaction},
+ {Name: "InvalidTxs", Fn: s.TestInvalidTxs},
+ {Name: "NewPooledTxs", Fn: s.TestNewPooledTxs},
+ {Name: "BlobViolations", Fn: s.TestBlobViolations},
}
}
@@ -94,9 +94,9 @@ func (s *Suite) SnapTests() []utesting.Test {
}
}
-// TestStatus attempts to connect to the given node and exchange a status
-// message with it on the eth protocol.
func (s *Suite) TestStatus(t *utesting.T) {
+ t.Log(`This test is just a sanity check. It performs an eth protocol handshake.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -112,9 +112,9 @@ func headersMatch(expected []*types.Header, headers []*types.Header) bool {
return reflect.DeepEqual(expected, headers)
}
-// TestGetBlockHeaders tests whether the given node can respond to an eth
-// `GetBlockHeaders` request and that the response is accurate.
func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
+ t.Log(`This test requests block headers from the node.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -154,10 +154,10 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
}
}
-// TestSimultaneousRequests sends two simultaneous `GetBlockHeader` requests
-// from the same connection with different request IDs and checks to make sure
-// the node responds with the correct headers per request.
func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
+ t.Log(`This test requests blocks headers from the node, performing two requests
+concurrently, with different request IDs.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -228,9 +228,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
}
}
-// TestSameRequestID sends two requests with the same request ID to a single
-// node.
func (s *Suite) TestSameRequestID(t *utesting.T) {
+ t.Log(`This test requests block headers, performing two concurrent requests with the
+same request ID. The node should handle the request by responding to both requests.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -298,9 +299,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
}
}
-// TestZeroRequestID checks that a message with a request ID of zero is still handled
-// by the node.
func (s *Suite) TestZeroRequestID(t *utesting.T) {
+ t.Log(`This test sends a GetBlockHeaders message with a request-id of zero,
+and expects a response.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -333,9 +335,9 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
}
}
-// TestGetBlockBodies tests whether the given node can respond to a
-// `GetBlockBodies` request and that the response is accurate.
func (s *Suite) TestGetBlockBodies(t *utesting.T) {
+ t.Log(`This test sends GetBlockBodies requests to the node for known blocks in the test chain.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -376,12 +378,12 @@ func randBuf(size int) []byte {
return buf
}
-// TestMaliciousHandshake tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
- key, _ := crypto.GenerateKey()
+ t.Log(`This test tries to send malicious data during the devp2p handshake, in various ways.`)
// Write hello to client.
var (
+ key, _ = crypto.GenerateKey()
pub0 = crypto.FromECDSAPub(&key.PublicKey)[1:]
version = eth.ProtocolVersions[0]
)
@@ -451,8 +453,9 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
}
}
-// TestMaliciousStatus sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
+ t.Log(`This test sends a malicious eth Status message to the node and expects a disconnect.`)
+
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
@@ -486,9 +489,10 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) {
}
}
-// TestTransaction sends a valid transaction to the node and checks if the
-// transaction gets propagated.
func (s *Suite) TestTransaction(t *utesting.T) {
+ t.Log(`This test sends a valid transaction to the node and checks if the
+transaction gets propagated.`)
+
// Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err)
@@ -507,15 +511,16 @@ func (s *Suite) TestTransaction(t *utesting.T) {
if err != nil {
t.Fatalf("failed to sign tx: %v", err)
}
- if err := s.sendTxs([]*types.Transaction{tx}); err != nil {
+ if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
t.Fatal(err)
}
s.chain.IncNonce(from, 1)
}
-// TestInvalidTxs sends several invalid transactions and tests whether
-// the node will propagate them.
func (s *Suite) TestInvalidTxs(t *utesting.T) {
+ t.Log(`This test sends several kinds of invalid transactions and checks that the node
+does not propagate them.`)
+
// Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err)
@@ -534,7 +539,7 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
if err != nil {
t.Fatalf("failed to sign tx: %v", err)
}
- if err := s.sendTxs([]*types.Transaction{tx}); err != nil {
+ if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
t.Fatalf("failed to send txs: %v", err)
}
s.chain.IncNonce(from, 1)
@@ -590,14 +595,15 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
}
txs = append(txs, tx)
}
- if err := s.sendInvalidTxs(txs); err != nil {
+ if err := s.sendInvalidTxs(t, txs); err != nil {
t.Fatalf("failed to send invalid txs: %v", err)
}
}
-// TestLargeTxRequest tests whether a node can fulfill a large GetPooledTransactions
-// request.
func (s *Suite) TestLargeTxRequest(t *utesting.T) {
+ t.Log(`This test first send ~2000 transactions to the node, then requests them
+on another peer connection using GetPooledTransactions.`)
+
// Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err)
@@ -630,7 +636,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
s.chain.IncNonce(from, uint64(count))
// Send txs.
- if err := s.sendTxs(txs); err != nil {
+ if err := s.sendTxs(t, txs); err != nil {
t.Fatalf("failed to send txs: %v", err)
}
@@ -667,13 +673,15 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
}
}
-// TestNewPooledTxs tests whether a node will do a GetPooledTransactions request
-// upon receiving a NewPooledTransactionHashes announcement.
func (s *Suite) TestNewPooledTxs(t *utesting.T) {
+ t.Log(`This test announces transaction hashes to the node and expects it to fetch
+the transactions using a GetPooledTransactions request.`)
+
// Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err)
}
+
var (
count = 50
from, nonce = s.chain.GetSender(1)
@@ -710,7 +718,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
}
// Send announcement.
- ann := eth.NewPooledTransactionHashesPacket68{Types: txTypes, Sizes: sizes, Hashes: hashes}
+ ann := eth.NewPooledTransactionHashesPacket{Types: txTypes, Sizes: sizes, Hashes: hashes}
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
if err != nil {
t.Fatalf("failed to write to connection: %v", err)
@@ -728,7 +736,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
}
return
- case *eth.NewPooledTransactionHashesPacket68:
+ case *eth.NewPooledTransactionHashesPacket:
continue
case *eth.TransactionsPacket:
continue
@@ -762,7 +770,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
from, nonce := s.chain.GetSender(5)
for i := 0; i < count; i++ {
// Make blob data, max of 2 blobs per tx.
- blobdata := make([]byte, blobs%2)
+ blobdata := make([]byte, blobs%3)
for i := range blobdata {
blobdata[i] = discriminator
blobs -= 1
@@ -787,6 +795,8 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
}
func (s *Suite) TestBlobViolations(t *utesting.T) {
+ t.Log(`This test sends some invalid blob tx announcements and expects the node to disconnect.`)
+
if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("send fcu failed: %v", err)
}
@@ -796,12 +806,12 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
t2 = s.makeBlobTxs(2, 3, 0x2)
)
for _, test := range []struct {
- ann eth.NewPooledTransactionHashesPacket68
+ ann eth.NewPooledTransactionHashesPacket
resp eth.PooledTransactionsResponse
}{
// Invalid tx size.
{
- ann: eth.NewPooledTransactionHashesPacket68{
+ ann: eth.NewPooledTransactionHashesPacket{
Types: []byte{types.BlobTxType, types.BlobTxType},
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
@@ -810,7 +820,7 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
},
// Wrong tx type.
{
- ann: eth.NewPooledTransactionHashesPacket68{
+ ann: eth.NewPooledTransactionHashesPacket{
Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},
diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go
index 0ea7c32752..80b5d80745 100644
--- a/cmd/devp2p/internal/ethtest/transaction.go
+++ b/cmd/devp2p/internal/ethtest/transaction.go
@@ -25,11 +25,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
+ "github.com/ethereum/go-ethereum/internal/utesting"
)
// sendTxs sends the given transactions to the node and
// expects the node to accept and propagate them.
-func (s *Suite) sendTxs(txs []*types.Transaction) error {
+func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
// Open sending conn.
sendConn, err := s.dial()
if err != nil {
@@ -70,10 +71,19 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
for _, tx := range *msg {
got[tx.Hash()] = true
}
- case *eth.NewPooledTransactionHashesPacket68:
+ case *eth.NewPooledTransactionHashesPacket:
for _, hash := range msg.Hashes {
got[hash] = true
}
+ case *eth.GetBlockHeadersPacket:
+ headers, err := s.chain.GetHeaders(msg)
+ if err != nil {
+ t.Logf("invalid GetBlockHeaders request: %v", err)
+ }
+ recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
+ RequestId: msg.RequestId,
+ BlockHeadersRequest: headers,
+ })
default:
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
}
@@ -95,7 +105,7 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
return fmt.Errorf("timed out waiting for txs")
}
-func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
+func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error {
// Open sending conn.
sendConn, err := s.dial()
if err != nil {
@@ -146,12 +156,21 @@ func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
return fmt.Errorf("received bad tx: %s", tx.Hash())
}
}
- case *eth.NewPooledTransactionHashesPacket68:
+ case *eth.NewPooledTransactionHashesPacket:
for _, hash := range msg.Hashes {
if _, ok := invalids[hash]; ok {
return fmt.Errorf("received bad tx: %s", hash)
}
}
+ case *eth.GetBlockHeadersPacket:
+ headers, err := s.chain.GetHeaders(msg)
+ if err != nil {
+ t.Logf("invalid GetBlockHeaders request: %v", err)
+ }
+ recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
+ RequestId: msg.RequestId,
+ BlockHeadersRequest: headers,
+ })
default:
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
}
diff --git a/cmd/era/main.go b/cmd/era/main.go
new file mode 100644
index 0000000000..e27d8ccec6
--- /dev/null
+++ b/cmd/era/main.go
@@ -0,0 +1,324 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/era"
+ "github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/internal/flags"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/urfave/cli/v2"
+)
+
+var app = flags.NewApp("go-ethereum era tool")
+
+var (
+ dirFlag = &cli.StringFlag{
+ Name: "dir",
+ Usage: "directory storing all relevant era1 files",
+ Value: "eras",
+ }
+ networkFlag = &cli.StringFlag{
+ Name: "network",
+ Usage: "network name associated with era1 files",
+ Value: "mainnet",
+ }
+ eraSizeFlag = &cli.IntFlag{
+ Name: "size",
+ Usage: "number of blocks per era",
+ Value: era.MaxEra1Size,
+ }
+ txsFlag = &cli.BoolFlag{
+ Name: "txs",
+ Usage: "print full transaction values",
+ }
+)
+
+var (
+ blockCommand = &cli.Command{
+ Name: "block",
+ Usage: "get block data",
+ ArgsUsage: "",
+ Action: block,
+ Flags: []cli.Flag{
+ txsFlag,
+ },
+ }
+ infoCommand = &cli.Command{
+ Name: "info",
+ ArgsUsage: "",
+ Usage: "get epoch information",
+ Action: info,
+ }
+ verifyCommand = &cli.Command{
+ Name: "verify",
+ ArgsUsage: "",
+ Usage: "verifies each era1 against expected accumulator root",
+ Action: verify,
+ }
+)
+
+func init() {
+ app.Commands = []*cli.Command{
+ blockCommand,
+ infoCommand,
+ verifyCommand,
+ }
+ app.Flags = []cli.Flag{
+ dirFlag,
+ networkFlag,
+ eraSizeFlag,
+ }
+}
+
+func main() {
+ if err := app.Run(os.Args); err != nil {
+ fmt.Fprintf(os.Stderr, "%v\n", err)
+ os.Exit(1)
+ }
+}
+
+// block prints the specified block from an era1 store.
+func block(ctx *cli.Context) error {
+ num, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid block number: %w", err)
+ }
+ e, err := open(ctx, num/uint64(ctx.Int(eraSizeFlag.Name)))
+ if err != nil {
+ return fmt.Errorf("error opening era1: %w", err)
+ }
+ defer e.Close()
+ // Read block with number.
+ block, err := e.GetBlockByNumber(num)
+ if err != nil {
+ return fmt.Errorf("error reading block %d: %w", num, err)
+ }
+ // Convert block to JSON and print.
+ val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig)
+ b, err := json.MarshalIndent(val, "", " ")
+ if err != nil {
+ return fmt.Errorf("error marshaling json: %w", err)
+ }
+ fmt.Println(string(b))
+ return nil
+}
+
+// info prints some high-level information about the era1 file.
+func info(ctx *cli.Context) error {
+ epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid epoch number: %w", err)
+ }
+ e, err := open(ctx, epoch)
+ if err != nil {
+ return err
+ }
+ defer e.Close()
+ acc, err := e.Accumulator()
+ if err != nil {
+ return fmt.Errorf("error reading accumulator: %w", err)
+ }
+ td, err := e.InitialTD()
+ if err != nil {
+ return fmt.Errorf("error reading total difficulty: %w", err)
+ }
+ info := struct {
+ Accumulator common.Hash `json:"accumulator"`
+ TotalDifficulty *big.Int `json:"totalDifficulty"`
+ StartBlock uint64 `json:"startBlock"`
+ Count uint64 `json:"count"`
+ }{
+ acc, td, e.Start(), e.Count(),
+ }
+ b, _ := json.MarshalIndent(info, "", " ")
+ fmt.Println(string(b))
+ return nil
+}
+
+// open opens an era1 file at a certain epoch.
+func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
+ var (
+ dir = ctx.String(dirFlag.Name)
+ network = ctx.String(networkFlag.Name)
+ )
+ entries, err := era.ReadDir(dir, network)
+ if err != nil {
+ return nil, fmt.Errorf("error reading era dir: %w", err)
+ }
+ if epoch >= uint64(len(entries)) {
+ return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
+ }
+ return era.Open(path.Join(dir, entries[epoch]))
+}
+
+// verify checks each era1 file in a directory to ensure it is well-formed and
+// that the accumulator matches the expected value.
+func verify(ctx *cli.Context) error {
+ if ctx.Args().Len() != 1 {
+ return fmt.Errorf("missing accumulators file")
+ }
+
+ roots, err := readHashes(ctx.Args().First())
+ if err != nil {
+ return fmt.Errorf("unable to read expected roots file: %w", err)
+ }
+
+ var (
+ dir = ctx.String(dirFlag.Name)
+ network = ctx.String(networkFlag.Name)
+ start = time.Now()
+ reported = time.Now()
+ )
+
+ entries, err := era.ReadDir(dir, network)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", dir, err)
+ }
+
+ if len(entries) != len(roots) {
+ return fmt.Errorf("number of era1 files should match the number of accumulator hashes")
+ }
+
+ // Verify each epoch matches the expected root.
+ for i, want := range roots {
+ // Wrap in function so defers don't stack.
+ err := func() error {
+ name := entries[i]
+ e, err := era.Open(path.Join(dir, name))
+ if err != nil {
+ return fmt.Errorf("error opening era1 file %s: %w", name, err)
+ }
+ defer e.Close()
+ // Read accumulator and check against expected.
+ if got, err := e.Accumulator(); err != nil {
+ return fmt.Errorf("error retrieving accumulator for %s: %w", name, err)
+ } else if got != want {
+ return fmt.Errorf("invalid root %s: got %s, want %s", name, got, want)
+ }
+ // Recompute accumulator.
+ if err := checkAccumulator(e); err != nil {
+ return fmt.Errorf("error verify era1 file %s: %w", name, err)
+ }
+ // Give the user some feedback that something is happening.
+ if time.Since(reported) >= 8*time.Second {
+ fmt.Printf("Verifying Era1 files \t\t verified=%d,\t elapsed=%s\n", i, common.PrettyDuration(time.Since(start)))
+ reported = time.Now()
+ }
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// checkAccumulator verifies the accumulator matches the data in the Era.
+func checkAccumulator(e *era.Era) error {
+ var (
+ err error
+ want common.Hash
+ td *big.Int
+ tds = make([]*big.Int, 0)
+ hashes = make([]common.Hash, 0)
+ )
+ if want, err = e.Accumulator(); err != nil {
+ return fmt.Errorf("error reading accumulator: %w", err)
+ }
+ if td, err = e.InitialTD(); err != nil {
+ return fmt.Errorf("error reading total difficulty: %w", err)
+ }
+ it, err := era.NewIterator(e)
+ if err != nil {
+ return fmt.Errorf("error making era iterator: %w", err)
+ }
+ // To fully verify an era the following attributes must be checked:
+ // 1) the block index is constructed correctly
+ // 2) the tx root matches the value in the block
+ // 3) the receipts root matches the value in the block
+ // 4) the starting total difficulty value is correct
+ // 5) the accumulator is correct by recomputing it locally, which verifies
+ // the blocks are all correct (via hash)
+ //
+ // The attributes 1), 2), and 3) are checked for each block. 4) and 5) require
+ // accumulation across the entire set and are verified at the end.
+ for it.Next() {
+ // 1) next() walks the block index, so we're able to implicitly verify it.
+ if it.Error() != nil {
+ return fmt.Errorf("error reading block %d: %w", it.Number(), err)
+ }
+ block, receipts, err := it.BlockAndReceipts()
+ if it.Error() != nil {
+ return fmt.Errorf("error reading block %d: %w", it.Number(), err)
+ }
+ // 2) recompute tx root and verify against header.
+ tr := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil))
+ if tr != block.TxHash() {
+ return fmt.Errorf("tx root in block %d mismatch: want %s, got %s", block.NumberU64(), block.TxHash(), tr)
+ }
+ // 3) recompute receipt root and check value against block.
+ rr := types.DeriveSha(receipts, trie.NewStackTrie(nil))
+ if rr != block.ReceiptHash() {
+ return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
+ }
+ hashes = append(hashes, block.Hash())
+ td.Add(td, block.Difficulty())
+ tds = append(tds, new(big.Int).Set(td))
+ }
+ // 4+5) Verify accumulator and total difficulty.
+ got, err := era.ComputeAccumulator(hashes, tds)
+ if err != nil {
+ return fmt.Errorf("error computing accumulator: %w", err)
+ }
+ if got != want {
+ return fmt.Errorf("expected accumulator root does not match calculated: got %s, want %s", got, want)
+ }
+ return nil
+}
+
+// readHashes reads a file of newline-delimited hashes.
+func readHashes(f string) ([]common.Hash, error) {
+ b, err := os.ReadFile(f)
+ if err != nil {
+ return nil, fmt.Errorf("unable to open accumulators file")
+ }
+ s := strings.Split(string(b), "\n")
+ // Remove empty last element, if present.
+ if s[len(s)-1] == "" {
+ s = s[:len(s)-1]
+ }
+ // Convert to hashes.
+ r := make([]common.Hash, len(s))
+ for i := range s {
+ r[i] = common.HexToHash(s[i])
+ }
+ return r, nil
+}
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index ae0cb5e4ad..ca1590833e 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -36,13 +36,14 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
type Prestate struct {
- Env stEnv `json:"env"`
- Pre core.GenesisAlloc `json:"pre"`
+ Env stEnv `json:"env"`
+ Pre types.GenesisAlloc `json:"pre"`
}
// ExecutionResult contains the execution status after running a state test, any
@@ -356,8 +357,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return statedb, execRs, body, nil
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
- sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
+ sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts {
statedb.SetCode(addr, a.Code)
diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go
index 45b945c8a7..3f3c7fc1f9 100644
--- a/cmd/evm/internal/t8ntool/transition.go
+++ b/cmd/evm/internal/t8ntool/transition.go
@@ -27,7 +27,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -74,10 +73,10 @@ var (
)
type input struct {
- Alloc core.GenesisAlloc `json:"alloc,omitempty"`
- Env *stEnv `json:"env,omitempty"`
- Txs []*txWithKey `json:"txs,omitempty"`
- TxRlp string `json:"txsRlp,omitempty"`
+ Alloc types.GenesisAlloc `json:"alloc,omitempty"`
+ Env *stEnv `json:"env,omitempty"`
+ Txs []*txWithKey `json:"txs,omitempty"`
+ TxRlp string `json:"txsRlp,omitempty"`
}
func Transition(ctx *cli.Context) error {
@@ -272,7 +271,7 @@ func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error {
return nil
}
-type Alloc map[common.Address]core.GenesisAccount
+type Alloc map[common.Address]types.Account
func (g Alloc) OnRoot(common.Hash) {}
@@ -288,7 +287,7 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) {
storage[k] = common.HexToHash(v)
}
}
- genesisAccount := core.GenesisAccount{
+ genesisAccount := types.Account{
Code: dumpAccount.Code,
Storage: storage,
Balance: balance,
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index b22c3efd65..274b4ab625 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -38,8 +38,8 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
"github.com/urfave/cli/v2"
)
@@ -148,7 +148,7 @@ func runCmd(ctx *cli.Context) error {
}
db := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(db, &trie.Config{
+ triedb := triedb.NewDatabase(db, &triedb.Config{
Preimages: preimages,
HashDB: hashdb.Defaults,
})
diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go
index 6e751b630f..458d809ad8 100644
--- a/cmd/evm/staterunner.go
+++ b/cmd/evm/staterunner.go
@@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/tests"
@@ -90,26 +89,27 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
if err != nil {
return err
}
- var tests map[string]tests.StateTest
- if err := json.Unmarshal(src, &tests); err != nil {
+ var testsByName map[string]tests.StateTest
+ if err := json.Unmarshal(src, &testsByName); err != nil {
return err
}
+
// Iterate over all the tests, run them and aggregate the results
- results := make([]StatetestResult, 0, len(tests))
- for key, test := range tests {
+ results := make([]StatetestResult, 0, len(testsByName))
+ for key, test := range testsByName {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
- test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, statedb *state.StateDB) {
+ test.Run(st, cfg, false, rawdb.HashScheme, func(err error, tstate *tests.StateTestState) {
var root common.Hash
- if statedb != nil {
- root = statedb.IntermediateRoot(false)
+ if tstate.StateDB != nil {
+ root = tstate.StateDB.IntermediateRoot(false)
result.Root = &root
if jsonOut {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
}
if dump { // Dump any state to aid debugging
- cpy, _ := state.New(root, statedb.Database(), nil)
+ cpy, _ := state.New(root, tstate.StateDB.Database(), nil)
dump := cpy.RawDump(nil)
result.State = &dump
}
diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh
index 8cc6aa41de..2ddda2d473 100644
--- a/cmd/evm/transition-test.sh
+++ b/cmd/evm/transition-test.sh
@@ -103,7 +103,7 @@ type Env struct {
CurrentTimestamp uint64 `json:"currentTimestamp"`
Withdrawals []*Withdrawal `json:"withdrawals"`
// optional
- CurrentDifficulty *big.Int `json:"currentDifficuly"`
+ CurrentDifficulty *big.Int `json:"currentDifficulty"`
CurrentRandom *big.Int `json:"currentRandom"`
CurrentBaseFee *big.Int `json:"currentBaseFee"`
ParentDifficulty *big.Int `json:"parentDifficulty"`
diff --git a/cmd/extradump/main.go b/cmd/extradump/main.go
index a19b6439ee..bb06735147 100644
--- a/cmd/extradump/main.go
+++ b/cmd/extradump/main.go
@@ -5,6 +5,7 @@ package main
import (
"bytes"
"encoding/hex"
+ "errors"
"flag"
"fmt"
"os"
@@ -78,7 +79,7 @@ func parseExtra(hexData string) (*Extra, error) {
// decode hex into bytes
data, err := hex.DecodeString(strings.TrimPrefix(hexData, "0x"))
if err != nil {
- return nil, fmt.Errorf("invalid hex data")
+ return nil, errors.New("invalid hex data")
}
// parse ExtraVanity and ExtraSeal
@@ -99,7 +100,7 @@ func parseExtra(hexData string) (*Extra, error) {
validatorNum := int(data[0])
validatorBytesTotalLength := validatorNumberSize + validatorNum*validatorBytesLength
if dataLength < validatorBytesTotalLength {
- return nil, fmt.Errorf("parse validators failed")
+ return nil, errors.New("parse validators failed")
}
extra.ValidatorSize = uint8(validatorNum)
data = data[validatorNumberSize:]
@@ -117,7 +118,7 @@ func parseExtra(hexData string) (*Extra, error) {
// parse Vote Attestation
if dataLength > 0 {
if err := rlp.Decode(bytes.NewReader(data), &extra.VoteAttestation); err != nil {
- return nil, fmt.Errorf("parse voteAttestation failed")
+ return nil, errors.New("parse voteAttestation failed")
}
if extra.ValidatorSize > 0 {
validatorsBitSet := bitset.From([]uint64{uint64(extra.VoteAddressSet)})
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 1b88aecb52..70f4bee426 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -42,13 +42,15 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
var (
@@ -148,6 +150,33 @@ Optional second and third arguments control the first and
last block to write. In this mode, the file will be appended
if already existing. If the file ends with .gz, the output will
be gzipped.`,
+ }
+ importHistoryCommand = &cli.Command{
+ Action: importHistory,
+ Name: "import-history",
+ Usage: "Import an Era archive",
+ ArgsUsage: "",
+ Flags: flags.Merge([]cli.Flag{
+ utils.TxLookupLimitFlag,
+ },
+ utils.DatabaseFlags,
+ utils.NetworkFlags,
+ ),
+ Description: `
+The import-history command will import blocks and their corresponding receipts
+from Era archives.
+`,
+ }
+ exportHistoryCommand = &cli.Command{
+ Action: exportHistory,
+ Name: "export-history",
+ Usage: "Export blockchain history to Era archives",
+ ArgsUsage: " ",
+ Flags: flags.Merge(utils.DatabaseFlags),
+ Description: `
+The export-history command will export blocks and their corresponding receipts
+into Era archives. Eras are typically packaged in steps of 8192 blocks.
+`,
}
importPreimagesCommand = &cli.Command{
Action: importPreimages,
@@ -237,6 +266,15 @@ func initGenesis(ctx *cli.Context) error {
}
defer chaindb.Close()
+ // if the trie data dir has been set, new trie db with a new state database
+ if ctx.IsSet(utils.SeparateDBFlag.Name) {
+ statediskdb, dbErr := stack.OpenDatabaseWithFreezer(name+"/state", 0, 0, "", "", false, false, false, false)
+ if dbErr != nil {
+ utils.Fatalf("Failed to open separate trie database: %v", dbErr)
+ }
+ chaindb.SetStateStore(statediskdb)
+ }
+
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close()
@@ -563,7 +601,95 @@ func exportChain(ctx *cli.Context) error {
}
err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
}
+ if err != nil {
+ utils.Fatalf("Export error: %v\n", err)
+ }
+ fmt.Printf("Export done in %v\n", time.Since(start))
+ return nil
+}
+
+func importHistory(ctx *cli.Context) error {
+ if ctx.Args().Len() != 1 {
+ utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
+ }
+
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+
+ chain, db := utils.MakeChain(ctx, stack, false)
+ defer db.Close()
+
+ var (
+ start = time.Now()
+ dir = ctx.Args().Get(0)
+ network string
+ )
+
+ // Determine network.
+ if utils.IsNetworkPreset(ctx) {
+ switch {
+ case ctx.Bool(utils.BSCMainnetFlag.Name):
+ network = "mainnet"
+ case ctx.Bool(utils.ChapelFlag.Name):
+ network = "chapel"
+ }
+ } else {
+ // No network flag set, try to determine network based on files
+ // present in directory.
+ var networks []string
+ for _, n := range params.NetworkNames {
+ entries, err := era.ReadDir(dir, n)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", dir, err)
+ }
+ if len(entries) > 0 {
+ networks = append(networks, n)
+ }
+ }
+ if len(networks) == 0 {
+ return fmt.Errorf("no era1 files found in %s", dir)
+ }
+ if len(networks) > 1 {
+ return fmt.Errorf("multiple networks found, use a network flag to specify desired network")
+ }
+ network = networks[0]
+ }
+
+ if err := utils.ImportHistory(chain, db, dir, network); err != nil {
+ return err
+ }
+ fmt.Printf("Import done in %v\n", time.Since(start))
+ return nil
+}
+
+// exportHistory exports chain history in Era archives at a specified
+// directory.
+func exportHistory(ctx *cli.Context) error {
+ if ctx.Args().Len() != 3 {
+ utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
+ }
+
+ stack, _ := makeConfigNode(ctx)
+ defer stack.Close()
+
+ chain, _ := utils.MakeChain(ctx, stack, true)
+ start := time.Now()
+ var (
+ dir = ctx.Args().Get(0)
+ first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
+ last, lerr = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
+ )
+ if ferr != nil || lerr != nil {
+ utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
+ }
+ if first < 0 || last < 0 {
+ utils.Fatalf("Export error: block number must be greater than 0\n")
+ }
+ if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
+ utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
+ }
+ err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
if err != nil {
utils.Fatalf("Export error: %v\n", err)
}
@@ -600,7 +726,6 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
}
db := utils.MakeChainDatabase(ctx, stack, true, false)
- defer db.Close()
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
if err != nil {
return nil, nil, common.Hash{}, err
@@ -609,7 +734,7 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
fmt.Println("You are using geth dump in path mode, please use `geth dump-roothash` command to get all available blocks.")
}
- var header *types.Header
+ header := &types.Header{}
if ctx.NArg() == 1 {
arg := ctx.Args().First()
if hashish(arg) {
@@ -633,12 +758,12 @@ func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, eth
} else {
// Use latest
if scheme == rawdb.PathScheme {
- triedb := trie.NewDatabase(db, &trie.Config{PathDB: pathdb.ReadOnly})
+ triedb := triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.ReadOnly})
defer triedb.Close()
if stateRoot := triedb.Head(); stateRoot != (common.Hash{}) {
header.Root = stateRoot
} else {
- return nil, nil, common.Hash{}, fmt.Errorf("no top state root hash in path db")
+ return nil, nil, common.Hash{}, errors.New("no top state root hash in path db")
}
} else {
header = rawdb.ReadHeadHeader(db)
@@ -703,7 +828,7 @@ func dumpAllRootHashInPath(ctx *cli.Context) error {
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
- triedb := trie.NewDatabase(db, &trie.Config{PathDB: pathdb.ReadOnly})
+ triedb := triedb.NewDatabase(db, &triedb.Config{PathDB: pathdb.ReadOnly})
defer triedb.Close()
scheme, err := rawdb.ParseStateScheme(ctx.String(utils.StateSchemeFlag.Name), db)
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 94c2317e95..6f7250aee1 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -203,6 +203,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverrideFeynman.Name)
cfg.Eth.OverrideFeynman = &v
}
+ if ctx.IsSet(utils.SeparateDBFlag.Name) && !stack.IsSeparatedDB() {
+ utils.Fatalf("Failed to locate separate database subdirectory when separatedb parameter has been set")
+ }
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Create gauge with geth system and build information
diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go
index 9304ebcb3b..59c0e0015e 100644
--- a/cmd/geth/consolecmd_test.go
+++ b/cmd/geth/consolecmd_test.go
@@ -30,7 +30,7 @@ import (
)
const (
- ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 miner:1.0 net:1.0 parlia:1.0 rpc:1.0 txpool:1.0 web3:1.0"
+ ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 mev:1.0 miner:1.0 net:1.0 parlia:1.0 rpc:1.0 txpool:1.0 web3:1.0"
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
)
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index 677bae5d12..f6015eca22 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -18,6 +18,7 @@ package main
import (
"bytes"
+ "errors"
"fmt"
"math"
"os"
@@ -39,7 +40,8 @@ import (
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/olekukonko/tablewriter"
"github.com/urfave/cli/v2"
)
@@ -381,7 +383,6 @@ func inspectTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true, false)
defer db.Close()
-
var headerBlockHash common.Hash
if ctx.NArg() >= 1 {
if ctx.Args().Get(0) == "latest" {
@@ -411,7 +412,7 @@ func inspectTrie(ctx *cli.Context) error {
if blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, blockNumber)
if headerBlockHash == (common.Hash{}) {
- return fmt.Errorf("ReadHeadBlockHash empry hash")
+ return errors.New("ReadHeadBlockHash empry hash")
}
blockHeader := rawdb.ReadHeader(db, headerBlockHash, blockNumber)
trieRootHash = blockHeader.Root
@@ -422,16 +423,16 @@ func inspectTrie(ctx *cli.Context) error {
fmt.Printf("ReadBlockHeader, root: %v, blocknum: %v\n", trieRootHash, blockNumber)
dbScheme := rawdb.ReadStateScheme(db)
- var config *trie.Config
+ var config *triedb.Config
if dbScheme == rawdb.PathScheme {
- config = &trie.Config{
+ config = &triedb.Config{
PathDB: pathdb.ReadOnly,
}
} else if dbScheme == rawdb.HashScheme {
- config = trie.HashDefaults
+ config = triedb.HashDefaults
}
- triedb := trie.NewDatabase(db, config)
+ triedb := triedb.NewDatabase(db, config)
theTrie, err := trie.New(trie.TrieID(trieRootHash), triedb)
if err != nil {
fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String())
@@ -562,6 +563,11 @@ func dbStats(ctx *cli.Context) error {
defer db.Close()
showLeveldbStats(db)
+ if db.StateStore() != nil {
+ fmt.Println("show stats of state store")
+ showLeveldbStats(db.StateStore())
+ }
+
return nil
}
@@ -575,13 +581,31 @@ func dbCompact(ctx *cli.Context) error {
log.Info("Stats before compaction")
showLeveldbStats(db)
+ statediskdb := db.StateStore()
+ if statediskdb != nil {
+ fmt.Println("show stats of state store")
+ showLeveldbStats(statediskdb)
+ }
+
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
- log.Info("Compact err", "error", err)
+ log.Error("Compact err", "error", err)
return err
}
+
+ if statediskdb != nil {
+ if err := statediskdb.Compact(nil, nil); err != nil {
+ log.Error("Compact err", "error", err)
+ return err
+ }
+ }
+
log.Info("Stats after compaction")
showLeveldbStats(db)
+ if statediskdb != nil {
+ fmt.Println("show stats of state store after compaction")
+ showLeveldbStats(statediskdb)
+ }
return nil
}
@@ -602,8 +626,17 @@ func dbGet(ctx *cli.Context) error {
return err
}
+ statediskdb := db.StateStore()
data, err := db.Get(key)
if err != nil {
+ // if separate trie db exist, try to get it from separate db
+ if statediskdb != nil {
+ statedata, dberr := statediskdb.Get(key)
+ if dberr == nil {
+ fmt.Printf("key %#x: %#x\n", key, statedata)
+ return nil
+ }
+ }
log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
return err
}
@@ -619,8 +652,14 @@ func dbTrieGet(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false, false)
- defer db.Close()
+ var db ethdb.Database
+ chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
+ if chaindb.StateStore() != nil {
+ db = chaindb.StateStore()
+ } else {
+ db = chaindb
+ }
+ defer chaindb.Close()
scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
@@ -685,8 +724,14 @@ func dbTrieDelete(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
- db := utils.MakeChainDatabase(ctx, stack, false, false)
- defer db.Close()
+ var db ethdb.Database
+ chaindb := utils.MakeChainDatabase(ctx, stack, true, false)
+ if chaindb.StateStore() != nil {
+ db = chaindb.StateStore()
+ } else {
+ db = chaindb
+ }
+ defer chaindb.Close()
scheme := ctx.String(utils.StateSchemeFlag.Name)
if scheme == "" {
@@ -1076,13 +1121,19 @@ func hbss2pbss(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, false, false)
db.Sync()
+ stateDiskDb := db.StateStore()
defer db.Close()
// convert hbss trie node to pbss trie node
- lastStateID := rawdb.ReadPersistentStateID(db)
+ var lastStateID uint64
+ if stateDiskDb != nil {
+ lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
+ } else {
+ lastStateID = rawdb.ReadPersistentStateID(db)
+ }
if lastStateID == 0 || force {
- config := trie.HashDefaults
- triedb := trie.NewDatabase(db, config)
+ config := triedb.HashDefaults
+ triedb := triedb.NewDatabase(db, config)
triedb.Cap(0)
log.Info("hbss2pbss triedb", "scheme", triedb.Scheme())
defer triedb.Close()
@@ -1102,7 +1153,7 @@ func hbss2pbss(ctx *cli.Context) error {
if *blockNumber != math.MaxUint64 {
headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber)
if headerBlockHash == (common.Hash{}) {
- return fmt.Errorf("ReadHeadBlockHash empty hash")
+ return errors.New("ReadHeadBlockHash empty hash")
}
blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber)
trieRootHash = blockHeader.Root
@@ -1110,7 +1161,7 @@ func hbss2pbss(ctx *cli.Context) error {
}
if (trieRootHash == common.Hash{}) {
log.Error("Empty root hash")
- return fmt.Errorf("Empty root hash.")
+ return errors.New("Empty root hash.")
}
id := trie.StateTrieID(trieRootHash)
@@ -1131,18 +1182,34 @@ func hbss2pbss(ctx *cli.Context) error {
}
// repair state ancient offset
- lastStateID = rawdb.ReadPersistentStateID(db)
+ if stateDiskDb != nil {
+ lastStateID = rawdb.ReadPersistentStateID(stateDiskDb)
+ } else {
+ lastStateID = rawdb.ReadPersistentStateID(db)
+ }
+
if lastStateID == 0 {
log.Error("Convert hbss to pbss trie node error. The last state id is still 0")
}
- ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
+
+ var ancient string
+ if db.StateStore() != nil {
+ dirName := filepath.Join(stack.ResolvePath("chaindata"), "state")
+ ancient = filepath.Join(dirName, "ancient")
+ } else {
+ ancient = stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
+ }
err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID)
if err != nil {
log.Error("Reset state freezer table offset failed", "error", err)
return err
}
// prune hbss trie node
- err = rawdb.PruneHashTrieNodeInDataBase(db)
+ if stateDiskDb != nil {
+ err = rawdb.PruneHashTrieNodeInDataBase(stateDiskDb)
+ } else {
+ err = rawdb.PruneHashTrieNodeInDataBase(db)
+ }
if err != nil {
log.Error("Prune Hash trie node in database failed", "error", err)
return err
diff --git a/cmd/geth/geth b/cmd/geth/geth
new file mode 100755
index 0000000000..9054c0c36a
Binary files /dev/null and b/cmd/geth/geth differ
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index e1242912f2..86fe2b6665 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -235,6 +235,8 @@ func init() {
initNetworkCommand,
importCommand,
exportCommand,
+ importHistoryCommand,
+ exportHistoryCommand,
importPreimagesCommand,
removedbCommand,
dumpCommand,
diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go
index feb2f532e1..1d8f068a9d 100644
--- a/cmd/geth/pruneblock_test.go
+++ b/cmd/geth/pruneblock_test.go
@@ -42,7 +42,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -51,7 +51,7 @@ var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
balance = big.NewInt(100000000000000000)
- gspec = &core.Genesis{Config: params.TestChainConfig, Alloc: core.GenesisAlloc{address: {Balance: balance}}, BaseFee: big.NewInt(params.InitialBaseFee)}
+ gspec = &core.Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: balance}}, BaseFee: big.NewInt(params.InitialBaseFee)}
signer = types.LatestSigner(gspec.Config)
config = &core.CacheConfig{
TrieCleanLimit: 256,
@@ -152,7 +152,7 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai
}
defer db.Close()
- triedb := trie.NewDatabase(db, nil)
+ triedb := triedb.NewDatabase(db, nil)
defer triedb.Close()
genesis := gspec.MustCommit(db, triedb)
diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go
index 73453c0f91..48a1ae0d11 100644
--- a/cmd/geth/snapshot.go
+++ b/cmd/geth/snapshot.go
@@ -45,6 +45,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
cli "github.com/urfave/cli/v2"
)
@@ -244,7 +245,7 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) {
NoBuild: true,
AsyncBuild: false,
}
- snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
+ snaptree, err := snapshot.New(snapconfig, chaindb, triedb.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false)
if err != nil {
log.Error("snaptree error", "err", err)
return nil, err // The relevant snapshot(s) might not exist
@@ -436,13 +437,15 @@ func pruneState(ctx *cli.Context) error {
chaindb := utils.MakeChainDatabase(ctx, stack, false, false)
defer chaindb.Close()
- if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
- log.Crit("Offline pruning is not required for path scheme")
- }
prunerconfig := pruner.Config{
Datadir: stack.ResolvePath(""),
BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name),
}
+
+ if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
+ log.Crit("Offline pruning is not required for path scheme")
+ }
+
pruner, err := pruner.NewPruner(chaindb, prunerconfig, ctx.Uint64(utils.TriesInMemoryFlag.Name))
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go
index 8b571be1ef..4b57164665 100644
--- a/cmd/utils/cmd.go
+++ b/cmd/utils/cmd.go
@@ -19,12 +19,15 @@ package utils
import (
"bufio"
+ "bytes"
"compress/gzip"
+ "crypto/sha256"
"errors"
"fmt"
"io"
"os"
"os/signal"
+ "path"
"runtime"
"strings"
"syscall"
@@ -39,8 +42,10 @@ import (
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/debug"
+ "github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/urfave/cli/v2"
)
@@ -228,6 +233,105 @@ func ImportChain(chain *core.BlockChain, fn string) error {
return nil
}
+func readList(filename string) ([]string, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return strings.Split(string(b), "\n"), nil
+}
+
+// ImportHistory imports Era1 files containing historical block information,
+// starting from genesis.
+func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
+ if chain.CurrentSnapBlock().Number.BitLen() != 0 {
+ return fmt.Errorf("history import only supported when starting from genesis")
+ }
+ entries, err := era.ReadDir(dir, network)
+ if err != nil {
+ return fmt.Errorf("error reading %s: %w", dir, err)
+ }
+ checksums, err := readList(path.Join(dir, "checksums.txt"))
+ if err != nil {
+ return fmt.Errorf("unable to read checksums.txt: %w", err)
+ }
+ if len(checksums) != len(entries) {
+ return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
+ }
+ var (
+ start = time.Now()
+ reported = time.Now()
+ imported = 0
+ forker = core.NewForkChoice(chain, nil)
+ h = sha256.New()
+ buf = bytes.NewBuffer(nil)
+ )
+ for i, filename := range entries {
+ err := func() error {
+ f, err := os.Open(path.Join(dir, filename))
+ if err != nil {
+ return fmt.Errorf("unable to open era: %w", err)
+ }
+ defer f.Close()
+
+ // Validate checksum.
+ if _, err := io.Copy(h, f); err != nil {
+ return fmt.Errorf("unable to recalculate checksum: %w", err)
+ }
+ if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
+ return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
+ }
+ h.Reset()
+ buf.Reset()
+
+ // Import all block data from Era1.
+ e, err := era.From(f)
+ if err != nil {
+ return fmt.Errorf("error opening era: %w", err)
+ }
+ it, err := era.NewIterator(e)
+ if err != nil {
+ return fmt.Errorf("error making era reader: %w", err)
+ }
+ for it.Next() {
+ block, err := it.Block()
+ if err != nil {
+ return fmt.Errorf("error reading block %d: %w", it.Number(), err)
+ }
+ if block.Number().BitLen() == 0 {
+ continue // skip genesis
+ }
+ receipts, err := it.Receipts()
+ if err != nil {
+ return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
+ }
+ if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
+ return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
+ } else if status != core.CanonStatTy {
+ return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
+ }
+ if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
+ return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
+ }
+ imported += 1
+
+ // Give the user some feedback that something is happening.
+ if time.Since(reported) >= 8*time.Second {
+ log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
+ imported = 0
+ reported = time.Now()
+ }
+ }
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
head := chain.CurrentBlock()
for i, block := range blocks {
@@ -297,6 +401,93 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
return nil
}
+// ExportHistory exports blockchain history into the specified directory,
+// following the Era format.
+func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
+ log.Info("Exporting blockchain history", "dir", dir)
+ if head := bc.CurrentBlock().Number.Uint64(); head < last {
+ log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
+ last = head
+ }
+ network := "unknown"
+ if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
+ network = name
+ }
+ if err := os.MkdirAll(dir, os.ModePerm); err != nil {
+ return fmt.Errorf("error creating output directory: %w", err)
+ }
+ var (
+ start = time.Now()
+ reported = time.Now()
+ h = sha256.New()
+ buf = bytes.NewBuffer(nil)
+ checksums []string
+ )
+ for i := first; i <= last; i += step {
+ err := func() error {
+ filename := path.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
+ f, err := os.Create(filename)
+ if err != nil {
+ return fmt.Errorf("could not create era file: %w", err)
+ }
+ defer f.Close()
+
+ w := era.NewBuilder(f)
+ for j := uint64(0); j < step && j <= last-i; j++ {
+ var (
+ n = i + j
+ block = bc.GetBlockByNumber(n)
+ )
+ if block == nil {
+ return fmt.Errorf("export failed on #%d: not found", n)
+ }
+ receipts := bc.GetReceiptsByHash(block.Hash())
+ if receipts == nil {
+ return fmt.Errorf("export failed on #%d: receipts not found", n)
+ }
+ td := bc.GetTd(block.Hash(), block.NumberU64())
+ if td == nil {
+ return fmt.Errorf("export failed on #%d: total difficulty not found", n)
+ }
+ if err := w.Add(block, receipts, td); err != nil {
+ return err
+ }
+ }
+ root, err := w.Finalize()
+ if err != nil {
+ return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
+ }
+ // Set correct filename with root.
+ os.Rename(filename, path.Join(dir, era.Filename(network, int(i/step), root)))
+
+ // Compute checksum of entire Era1.
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if _, err := io.Copy(h, f); err != nil {
+ return fmt.Errorf("unable to calculate checksum: %w", err)
+ }
+ checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
+ h.Reset()
+ buf.Reset()
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ if time.Since(reported) >= 8*time.Second {
+ log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
+ reported = time.Now()
+ }
+ }
+
+ os.WriteFile(path.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
+
+ log.Info("Exported blockchain to", "dir", dir)
+
+ return nil
+}
+
// ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error {
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 3514734c90..18c0167eee 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -69,9 +69,9 @@ import (
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/fatih/structs"
pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem"
@@ -93,6 +93,12 @@ var (
Value: flags.DirectoryString(node.DefaultDataDir()),
Category: flags.EthCategory,
}
+ SeparateDBFlag = &cli.BoolFlag{
+ Name: "separatedb",
+ Usage: "Enable a separated trie database, it will be created within a subdirectory called state, " +
+ "Users can copy this state directory to another directory or disk, and then create a symbolic link to the state directory under the chaindata",
+ Category: flags.EthCategory,
+ }
DirectBroadcastFlag = &cli.BoolFlag{
Name: "directbroadcast",
Usage: "Enable directly broadcast mined block to all peers",
@@ -1112,6 +1118,7 @@ var (
DBEngineFlag,
StateSchemeFlag,
HttpHeaderFlag,
+ SeparateDBFlag,
}
)
@@ -2314,6 +2321,11 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
chainDb, err = stack.OpenDatabase("lightchaindata", cache, handles, "", readonly)
default:
chainDb, err = stack.OpenDatabaseWithFreezer("chaindata", cache, handles, ctx.String(AncientFlag.Name), "", readonly, disableFreeze, false, false)
+ // set the separate state database
+ if stack.IsSeparatedDB() && err == nil {
+ stateDiskDb := MakeStateDataBase(ctx, stack, readonly, false)
+ chainDb.SetStateStore(stateDiskDb)
+ }
}
if err != nil {
Fatalf("Could not open database: %v", err)
@@ -2321,6 +2333,17 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree
return chainDb
}
+// MakeStateDataBase open a separate state database using the flags passed to the client and will hard crash if it fails.
+func MakeStateDataBase(ctx *cli.Context, stack *node.Node, readonly, disableFreeze bool) ethdb.Database {
+ cache := ctx.Int(CacheFlag.Name) * ctx.Int(CacheDatabaseFlag.Name) / 100
+ handles := MakeDatabaseHandles(ctx.Int(FDLimitFlag.Name)) / 2
+ statediskdb, err := stack.OpenDatabaseWithFreezer("chaindata/state", cache, handles, "", "", readonly, disableFreeze, false, false)
+ if err != nil {
+ Fatalf("Failed to open separate trie database: %v", err)
+ }
+ return statediskdb
+}
+
// tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
// or fallback to write mode if the database is not initialized.
//
@@ -2463,8 +2486,8 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}
// MakeTrieDatabase constructs a trie database based on the configured scheme.
-func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *trie.Database {
- config := &trie.Config{
+func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool, isVerkle bool) *triedb.Database {
+ config := &triedb.Config{
Preimages: preimage,
IsVerkle: isVerkle,
}
@@ -2477,14 +2500,14 @@ func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, read
// ignore the parameter silently. TODO(rjl493456442)
// please config it if read mode is implemented.
config.HashDB = hashdb.Defaults
- return trie.NewDatabase(disk, config)
+ return triedb.NewDatabase(disk, config)
}
if readOnly {
config.PathDB = pathdb.ReadOnly
} else {
config.PathDB = pathdb.Defaults
}
- return trie.NewDatabase(disk, config)
+ return triedb.NewDatabase(disk, config)
}
// ParseCLIAndConfigStateScheme parses state scheme in CLI and config.
diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go
new file mode 100644
index 0000000000..45a9da6265
--- /dev/null
+++ b/cmd/utils/history_test.go
@@ -0,0 +1,185 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package utils
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "io"
+ "math/big"
+ "os"
+ "path"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/internal/era"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
+)
+
+var (
+ count uint64 = 128
+ step uint64 = 16
+)
+
+func TestHistoryImportAndExport(t *testing.T) {
+ var (
+ key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ genesis = &core.Genesis{
+ Config: params.TestChainConfig,
+ Alloc: types.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}},
+ }
+ signer = types.LatestSigner(genesis.Config)
+ )
+
+ // Generate chain.
+ db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) {
+ if i == 0 {
+ return
+ }
+ tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{
+ ChainID: genesis.Config.ChainID,
+ Nonce: uint64(i - 1),
+ GasTipCap: common.Big0,
+ GasFeeCap: g.PrevBlock(0).BaseFee(),
+ Gas: 50000,
+ To: &common.Address{0xaa},
+ Value: big.NewInt(int64(i)),
+ Data: nil,
+ AccessList: nil,
+ })
+ if err != nil {
+ t.Fatalf("error creating tx: %v", err)
+ }
+ g.AddTx(tx)
+ })
+
+ // Initialize BlockChain.
+ chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("unable to initialize chain: %v", err)
+ }
+ if _, err := chain.InsertChain(blocks); err != nil {
+ t.Fatalf("error insterting chain: %v", err)
+ }
+
+ // Make temp directory for era files.
+ dir, err := os.MkdirTemp("", "history-export-test")
+ if err != nil {
+ t.Fatalf("error creating temp test directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Export history to temp directory.
+ if err := ExportHistory(chain, dir, 0, count, step); err != nil {
+ t.Fatalf("error exporting history: %v", err)
+ }
+
+ // Read checksums.
+ b, err := os.ReadFile(path.Join(dir, "checksums.txt"))
+ if err != nil {
+ t.Fatalf("failed to read checksums: %v", err)
+ }
+ checksums := strings.Split(string(b), "\n")
+
+ // Verify each Era.
+ entries, _ := era.ReadDir(dir, "mainnet")
+ for i, filename := range entries {
+ func() {
+ f, err := os.Open(path.Join(dir, filename))
+ if err != nil {
+ t.Fatalf("error opening era file: %v", err)
+ }
+ var (
+ h = sha256.New()
+ buf = bytes.NewBuffer(nil)
+ )
+ if _, err := io.Copy(h, f); err != nil {
+ t.Fatalf("unable to recalculate checksum: %v", err)
+ }
+ if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want {
+ t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want)
+ }
+ e, err := era.From(f)
+ if err != nil {
+ t.Fatalf("error opening era: %v", err)
+ }
+ defer e.Close()
+ it, err := era.NewIterator(e)
+ if err != nil {
+ t.Fatalf("error making era reader: %v", err)
+ }
+ for j := 0; it.Next(); j++ {
+ n := i*int(step) + j
+ if it.Error() != nil {
+ t.Fatalf("error reading block entry %d: %v", n, it.Error())
+ }
+ block, receipts, err := it.BlockAndReceipts()
+ if err != nil {
+ t.Fatalf("error reading block entry %d: %v", n, err)
+ }
+ want := chain.GetBlockByNumber(uint64(n))
+ if want, got := uint64(n), block.NumberU64(); want != got {
+ t.Fatalf("blocks out of order: want %d, got %d", want, got)
+ }
+ if want.Hash() != block.Hash() {
+ t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex())
+ }
+ if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() {
+ t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got)
+ }
+ if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() {
+ t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got)
+ }
+ if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() {
+ t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got)
+ }
+ }
+ }()
+ }
+
+ // Now import Era.
+ freezer := t.TempDir()
+ db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false)
+ if err != nil {
+ panic(err)
+ }
+ t.Cleanup(func() {
+ db2.Close()
+ })
+
+ genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
+ imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
+ if err != nil {
+ t.Fatalf("unable to initialize chain: %v", err)
+ }
+ if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil {
+ t.Fatalf("failed to import chain: %v", err)
+ }
+ if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() {
+ t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash())
+ }
+}
diff --git a/common/bidutil/bidutil.go b/common/bidutil/bidutil.go
new file mode 100644
index 0000000000..d2735808c6
--- /dev/null
+++ b/common/bidutil/bidutil.go
@@ -0,0 +1,23 @@
+package bidutil
+
+import (
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// BidBetterBefore returns the time when the next bid better be received, considering the delay and bid simulation.
+// BidBetterBefore is earlier than BidMustBefore.
+func BidBetterBefore(parentHeader *types.Header, blockPeriod uint64, delayLeftOver, simulationLeftOver time.Duration) time.Time {
+ nextHeaderTime := BidMustBefore(parentHeader, blockPeriod, delayLeftOver)
+ nextHeaderTime = nextHeaderTime.Add(-simulationLeftOver)
+ return nextHeaderTime
+}
+
+// BidMustBefore returns the time when the next bid must be received,
+// only considering the consensus delay but not bid simulation duration.
+func BidMustBefore(parentHeader *types.Header, blockPeriod uint64, delayLeftOver time.Duration) time.Time {
+ nextHeaderTime := time.Unix(int64(parentHeader.Time+blockPeriod), 0)
+ nextHeaderTime = nextHeaderTime.Add(-delayLeftOver)
+ return nextHeaderTime
+}
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index 722f4b0188..9e1c39b99a 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -333,6 +333,11 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [
return abort, results
}
+// NextInTurnValidator return the next in-turn validator for header
+func (beacon *Beacon) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
+ return common.Address{}, errors.New("not implemented")
+}
+
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline.
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index fb1fa1b121..bad64b24af 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -511,6 +511,11 @@ func (c *Clique) verifySeal(snap *Snapshot, header *types.Header, parents []*typ
return nil
}
+// NextInTurnValidator return the next in-turn validator for header
+func (c *Clique) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
+ return common.Address{}, errors.New("not implemented")
+}
+
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go
index 3f52fa541c..f479ac6169 100644
--- a/consensus/clique/clique_test.go
+++ b/consensus/clique/clique_test.go
@@ -48,7 +48,7 @@ func TestReimportMirroredState(t *testing.T) {
genspec := &core.Genesis{
Config: params.AllCliqueProtocolChanges,
ExtraData: make([]byte, extraVanity+common.AddressLength+extraSeal),
- Alloc: map[common.Address]core.GenesisAccount{
+ Alloc: map[common.Address]types.Account{
addr: {Balance: big.NewInt(10000000000000000)},
},
BaseFee: big.NewInt(params.InitialBaseFee),
diff --git a/consensus/consensus.go b/consensus/consensus.go
index cb5f1841ae..7b7648878b 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -94,6 +94,9 @@ type Engine interface {
// rules of a given engine.
VerifyUncles(chain ChainReader, block *types.Block) error
+ // NextInTurnValidator return the next in-turn validator for header
+ NextInTurnValidator(chain ChainHeaderReader, header *types.Header) (common.Address, error)
+
// Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline.
Prepare(chain ChainHeaderReader, header *types.Header) error
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index 8b39b27dc1..db730fab29 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -489,6 +489,11 @@ var FrontierDifficultyCalculator = calcDifficultyFrontier
var HomesteadDifficultyCalculator = calcDifficultyHomestead
var DynamicDifficultyCalculator = makeDifficultyCalculator
+// NextInTurnValidator return the next in-turn validator for header
+func (ethash *Ethash) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
+ return common.Address{}, errors.New("not implemented")
+}
+
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the ethash protocol. The changes are done inline.
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
diff --git a/consensus/parlia/feynmanfork.go b/consensus/parlia/feynmanfork.go
index c6a958e215..32f9951d85 100644
--- a/consensus/parlia/feynmanfork.go
+++ b/consensus/parlia/feynmanfork.go
@@ -3,7 +3,7 @@ package parlia
import (
"container/heap"
"context"
- "fmt"
+ "errors"
"math"
"math/big"
@@ -159,7 +159,7 @@ func (p *Parlia) getValidatorElectionInfo(blockNr rpc.BlockNumberOrHash) ([]Vali
return nil, err
}
if totalLength.Int64() != int64(len(validators)) || totalLength.Int64() != int64(len(votingPowers)) || totalLength.Int64() != int64(len(voteAddrs)) {
- return nil, fmt.Errorf("validator length not match")
+ return nil, errors.New("validator length not match")
}
validatorItems := make([]ValidatorItem, len(validators))
diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go
index 78f7587810..edc797aa8e 100644
--- a/consensus/parlia/parlia.go
+++ b/consensus/parlia/parlia.go
@@ -435,7 +435,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
return nil
}
if attestation.Data == nil {
- return fmt.Errorf("invalid attestation, vote data is nil")
+ return errors.New("invalid attestation, vote data is nil")
}
if len(attestation.Extra) > types.MaxAttestationExtraLength {
return fmt.Errorf("invalid attestation, too large extra length: %d", len(attestation.Extra))
@@ -464,7 +464,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
}
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, headers)
if err != nil {
- return fmt.Errorf("unexpected error when getting the highest justified number and hash")
+ return errors.New("unexpected error when getting the highest justified number and hash")
}
if sourceNumber != justifiedBlockNumber || sourceHash != justifiedBlockHash {
return fmt.Errorf("invalid attestation, source mismatch, expected block: %d, hash: %s; real block: %d, hash: %s",
@@ -486,7 +486,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
validators := snap.validators()
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() > uint(len(validators)) {
- return fmt.Errorf("invalid attestation, vote number larger than validators number")
+ return errors.New("invalid attestation, vote number larger than validators number")
}
votedAddrs := make([]bls.PublicKey, 0, validatorsBitSet.Count())
for index, val := range validators {
@@ -503,7 +503,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
// The valid voted validators should be no less than 2/3 validators.
if len(votedAddrs) < cmath.CeilDiv(len(snap.Validators)*2, 3) {
- return fmt.Errorf("invalid attestation, not enough validators voted")
+ return errors.New("invalid attestation, not enough validators voted")
}
// Verify the aggregated signature.
@@ -512,7 +512,7 @@ func (p *Parlia) verifyVoteAttestation(chain consensus.ChainHeaderReader, header
return fmt.Errorf("BLS signature converts failed: %v", err)
}
if !aggSig.FastAggregateVerify(votedAddrs, attestation.Data.Hash()) {
- return fmt.Errorf("invalid attestation, signature verify failed")
+ return errors.New("invalid attestation, signature verify failed")
}
return nil
@@ -605,18 +605,6 @@ func (p *Parlia) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
}
}
- if !cancun && header.ExcessBlobGas != nil {
- return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
- }
- if !cancun && header.BlobGasUsed != nil {
- return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
- }
- if cancun {
- if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
- return err
- }
- }
-
// All basic checks passed, verify cascading fields
return p.verifyCascadingFields(chain, header, parents)
}
@@ -904,7 +892,7 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
// Prepare vote data
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{parent})
if err != nil {
- return fmt.Errorf("unexpected error when getting the highest justified number and hash")
+ return errors.New("unexpected error when getting the highest justified number and hash")
}
attestation := &types.VoteAttestation{
Data: &types.VoteData{
@@ -941,7 +929,7 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
validatorsBitSet := bitset.From([]uint64{uint64(attestation.VoteAddressSet)})
if validatorsBitSet.Count() < uint(len(signatures)) {
log.Warn(fmt.Sprintf("assembleVoteAttestation, check VoteAddress Set failed, expected:%d, real:%d", len(signatures), validatorsBitSet.Count()))
- return fmt.Errorf("invalid attestation, check VoteAddress Set failed")
+ return errors.New("invalid attestation, check VoteAddress Set failed")
}
// Append attestation to header extra field.
@@ -960,6 +948,16 @@ func (p *Parlia) assembleVoteAttestation(chain consensus.ChainHeaderReader, head
return nil
}
+// NextInTurnValidator return the next in-turn validator for header
+func (p *Parlia) NextInTurnValidator(chain consensus.ChainHeaderReader, header *types.Header) (common.Address, error) {
+ snap, err := p.snapshot(chain, header.Number.Uint64(), header.Hash(), nil)
+ if err != nil {
+ return common.Address{}, err
+ }
+
+ return snap.inturnValidator(), nil
+}
+
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
@@ -1344,27 +1342,27 @@ func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteE
header := chain.GetHeaderByHash(targetHash)
if header == nil {
log.Warn("BlockHeader at current voteBlockNumber is nil", "targetNumber", targetNumber, "targetHash", targetHash)
- return fmt.Errorf("BlockHeader at current voteBlockNumber is nil")
+ return errors.New("BlockHeader at current voteBlockNumber is nil")
}
if header.Number.Uint64() != targetNumber {
log.Warn("unexpected target number", "expect", header.Number.Uint64(), "real", targetNumber)
- return fmt.Errorf("target number mismatch")
+ return errors.New("target number mismatch")
}
justifiedBlockNumber, justifiedBlockHash, err := p.GetJustifiedNumberAndHash(chain, []*types.Header{header})
if err != nil {
log.Error("failed to get the highest justified number and hash", "headerNumber", header.Number, "headerHash", header.Hash())
- return fmt.Errorf("unexpected error when getting the highest justified number and hash")
+ return errors.New("unexpected error when getting the highest justified number and hash")
}
if vote.Data.SourceNumber != justifiedBlockNumber || vote.Data.SourceHash != justifiedBlockHash {
- return fmt.Errorf("vote source block mismatch")
+ return errors.New("vote source block mismatch")
}
number := header.Number.Uint64()
snap, err := p.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
log.Error("failed to get the snapshot from consensus", "error", err)
- return fmt.Errorf("failed to get the snapshot from consensus")
+ return errors.New("failed to get the snapshot from consensus")
}
validators := snap.Validators
@@ -1379,7 +1377,7 @@ func (p *Parlia) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteE
}
}
- return fmt.Errorf("vote verification failed")
+ return errors.New("vote verification failed")
}
// Authorize injects a private key into the consensus engine to mint new blocks
@@ -1838,7 +1836,7 @@ func (p *Parlia) applyTransaction(
// within the branch including `headers` and utilizing the latest element as the head.
func (p *Parlia) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
if chain == nil || len(headers) == 0 || headers[len(headers)-1] == nil {
- return 0, common.Hash{}, fmt.Errorf("illegal chain or header")
+ return 0, common.Hash{}, errors.New("illegal chain or header")
}
head := headers[len(headers)-1]
snap, err := p.snapshot(chain, head.Number.Uint64(), head.Hash(), headers)
diff --git a/consensus/parlia/snapshot.go b/consensus/parlia/snapshot.go
index ddfb1811fc..0da0929e7c 100644
--- a/consensus/parlia/snapshot.go
+++ b/consensus/parlia/snapshot.go
@@ -338,6 +338,13 @@ func (s *Snapshot) inturn(validator common.Address) bool {
return validators[offset] == validator
}
+// inturnValidator returns the validator at a given block height.
+func (s *Snapshot) inturnValidator() common.Address {
+ validators := s.validators()
+ offset := (s.Number + 1) % uint64(len(validators))
+ return validators[offset]
+}
+
func (s *Snapshot) enoughDistance(validator common.Address, header *types.Header) bool {
idx := s.indexOfVal(validator)
if idx < 0 {
diff --git a/core/bench_test.go b/core/bench_test.go
index 951ce2a08c..97713868a5 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -189,7 +189,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// generator function.
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
+ Alloc: types.GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}},
}
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), b.N, gen)
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index 614676c282..1ab82ea0be 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -106,7 +106,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
gspec = &Genesis{
Config: &config,
ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength),
- Alloc: map[common.Address]GenesisAccount{
+ Alloc: map[common.Address]types.Account{
addr: {Balance: big.NewInt(1)},
},
BaseFee: big.NewInt(params.InitialBaseFee),
diff --git a/core/blockchain.go b/core/blockchain.go
index 5141169986..62b1e56f60 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -53,9 +53,9 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"golang.org/x/exp/slices"
)
@@ -170,8 +170,8 @@ type CacheConfig struct {
}
// triedbConfig derives the configures for trie database.
-func (c *CacheConfig) triedbConfig() *trie.Config {
- config := &trie.Config{
+func (c *CacheConfig) triedbConfig() *triedb.Config {
+ config := &triedb.Config{
Cache: c.TrieCleanLimit,
Preimages: c.Preimages,
NoTries: c.NoTries,
@@ -247,7 +247,7 @@ type BlockChain struct {
commitLock sync.Mutex // CommitLock is used to protect above field from being modified concurrently
lastWrite uint64 // Last block when the state was flushed
flushInterval atomic.Int64 // Time interval (processing time) after which to flush a state
- triedb *trie.Database // The database handler for maintaining trie nodes.
+ triedb *triedb.Database // The database handler for maintaining trie nodes.
stateCache state.Database // State database to reuse between imports (contains state cache)
triesInMemory uint64
txIndexer *txIndexer // Transaction indexer, might be nil if not enabled
@@ -325,7 +325,8 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
diffLayerChanCache, _ := exlru.New(diffLayerCacheLimit)
// Open trie database with provided config
- triedb := trie.NewDatabase(db, cacheConfig.triedbConfig())
+ triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig())
+
// Setup the genesis block, commit the provided genesis specification
// to database if the genesis block is not present yet, or load the
// stored one from database.
@@ -2478,6 +2479,12 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// rewind the canonical chain to a lower point.
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
}
+ // Reset the tx lookup cache in case to clear stale txlookups.
+ // This is done before writing any new chain data to avoid the
+ // weird scenario that canonical chain is changed while the
+ // stale lookups are still cached.
+ bc.txLookupCache.Purge()
+
// Insert the new chain(except the head block(reverse order)),
// taking care of the proper incremental order.
for i := len(newChain) - 1; i >= 1; i-- {
@@ -2492,11 +2499,13 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// Delete useless indexes right now which includes the non-canonical
// transaction indexes, canonical chain indexes which above the head.
- indexesBatch := bc.db.NewBatch()
- for _, tx := range types.HashDifference(deletedTxs, addedTxs) {
+ var (
+ indexesBatch = bc.db.NewBatch()
+ diffs = types.HashDifference(deletedTxs, addedTxs)
+ )
+ for _, tx := range diffs {
rawdb.DeleteTxLookupEntry(indexesBatch, tx)
}
-
// Delete all hash markers that are not part of the new canonical chain.
// Because the reorg function does not handle new chain head, all hash
// markers greater than or equal to new chain head should be deleted.
@@ -2952,7 +2961,7 @@ func (bc *BlockChain) GetTrustedDiffLayer(blockHash common.Hash) *types.DiffLaye
func CalculateDiffHash(d *types.DiffLayer) (common.Hash, error) {
if d == nil {
- return common.Hash{}, fmt.Errorf("nil diff layer")
+ return common.Hash{}, errors.New("nil diff layer")
}
diff := &types.ExtDiffLayer{
@@ -3006,7 +3015,7 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) {
bc.flushInterval.Store(int64(interval))
}
-// GetTrieFlushInterval gets the in-memory tries flush interval
+// GetTrieFlushInterval gets the in-memory tries flushAlloc interval
func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load())
}
diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go
index b3cc434193..36cc1f5514 100644
--- a/core/blockchain_reader.go
+++ b/core/blockchain_reader.go
@@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
@@ -432,10 +432,15 @@ func (bc *BlockChain) TxIndexProgress() (TxIndexProgress, error) {
}
// TrieDB retrieves the low level trie database used for data storage.
-func (bc *BlockChain) TrieDB() *trie.Database {
+func (bc *BlockChain) TrieDB() *triedb.Database {
return bc.triedb
}
+// HeaderChain returns the underlying header chain.
+func (bc *BlockChain) HeaderChain() *HeaderChain {
+ return bc.hc
+}
+
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go
index 83dd926504..217610c33a 100644
--- a/core/blockchain_sethead_test.go
+++ b/core/blockchain_sethead_test.go
@@ -34,9 +34,9 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
// rewindTest is a test case for chain rollback upon user request.
@@ -2034,13 +2034,13 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
}
// Reopen the trie database without persisting in-memory dirty nodes.
chain.triedb.Close()
- dbconfig := &trie.Config{}
+ dbconfig := &triedb.Config{}
if scheme == rawdb.PathScheme {
dbconfig.PathDB = pathdb.Defaults
} else {
dbconfig.HashDB = hashdb.Defaults
}
- chain.triedb = trie.NewDatabase(chain.db, dbconfig)
+ chain.triedb = triedb.NewDatabase(chain.db, dbconfig)
chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb)
// Force run a freeze cycle
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index b337a47b78..e0b5a64f4b 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -922,7 +922,7 @@ func testFastVsFullChains(t *testing.T, scheme string) {
funds = big.NewInt(1000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{address: {Balance: funds}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
signer = types.LatestSigner(gspec.Config)
@@ -1055,7 +1055,7 @@ func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) {
funds = big.NewInt(1000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{address: {Balance: funds}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
)
@@ -1175,7 +1175,7 @@ func testChainTxReorgs(t *testing.T, scheme string) {
gspec = &Genesis{
Config: params.TestChainConfig,
GasLimit: 3141592,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr1: {Balance: big.NewInt(1000000000000000)},
addr2: {Balance: big.NewInt(1000000000000000)},
addr3: {Balance: big.NewInt(1000000000000000)},
@@ -1290,7 +1290,7 @@ func testLogReorgs(t *testing.T, scheme string) {
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config)
)
@@ -1347,7 +1347,7 @@ func testLogRebirth(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config)
engine = ethash.NewFaker()
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil)
@@ -1429,7 +1429,7 @@ func testSideLogRebirth(t *testing.T, scheme string) {
var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
signer = types.LatestSigner(gspec.Config)
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
)
@@ -1526,7 +1526,7 @@ func testReorgSideEvent(t *testing.T, scheme string) {
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
+ Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
}
signer = types.LatestSigner(gspec.Config)
)
@@ -1669,7 +1669,7 @@ func testEIP155Transition(t *testing.T, scheme string) {
EIP155Block: big.NewInt(2),
HomesteadBlock: new(big.Int),
},
- Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
}
)
genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
@@ -1784,7 +1784,7 @@ func testEIP161AccountRemoval(t *testing.T, scheme string) {
EIP150Block: new(big.Int),
EIP158Block: big.NewInt(2),
},
- Alloc: GenesisAlloc{address: {Balance: funds}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}},
}
)
_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
@@ -2015,7 +2015,7 @@ func testBlockchainRecovery(t *testing.T, scheme string) {
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}}
)
height := uint64(1024)
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
@@ -2220,7 +2220,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
gspec = &Genesis{
Config: &chainConfig,
- Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
+ Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
signer = types.LatestSigner(gspec.Config)
@@ -2815,7 +2815,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
bankFunds = big.NewInt(100000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
testBankAddress: {Balance: bankFunds},
common.HexToAddress("0xc0de"): {
Code: []byte{0x60, 0x01, 0x50},
@@ -2993,7 +2993,7 @@ func testDeleteCreateRevert(t *testing.T, scheme string) {
funds = big.NewInt(100000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xAAAAA selfdestructs if called
aa: {
@@ -3117,7 +3117,7 @@ func testDeleteRecreateSlots(t *testing.T, scheme string) {
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xAAAAA selfdestructs if called
aa: {
@@ -3203,7 +3203,7 @@ func testDeleteRecreateAccount(t *testing.T, scheme string) {
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xAAAAA selfdestructs if called
aa: {
@@ -3324,7 +3324,7 @@ func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) {
t.Logf("Destination address: %x\n", aa)
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xAAAAA selfdestructs if called
aa: {
@@ -3519,7 +3519,7 @@ func testInitThenFailCreateContract(t *testing.T, scheme string) {
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address aa has some funds
aa: {Balance: big.NewInt(100000)},
@@ -3603,7 +3603,7 @@ func testEIP2718TransitionWithConfig(t *testing.T, scheme string, config *params
funds = big.NewInt(1000000000000000)
gspec = &Genesis{
Config: config,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xAAAA sloads 0x00 and 0x01
aa: {
@@ -3688,7 +3688,7 @@ func testEIP1559Transition(t *testing.T, scheme string) {
config = *params.AllEthashProtocolChanges
gspec = &Genesis{
Config: &config,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr1: {Balance: funds},
addr2: {Balance: funds},
// The address 0xAAAA sloads 0x00 and 0x01
@@ -3829,7 +3829,7 @@ func testSetCanonical(t *testing.T, scheme string) {
funds = big.NewInt(100000000000000000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{address: {Balance: funds}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
signer = types.LatestSigner(gspec.Config)
@@ -3946,7 +3946,7 @@ func testCanonicalHashMarker(t *testing.T, scheme string) {
var (
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{},
+ Alloc: types.GenesisAlloc{},
BaseFee: big.NewInt(params.InitialBaseFee),
}
engine = ethash.NewFaker()
@@ -4059,7 +4059,7 @@ func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
}...)
gspec := &Genesis{
Config: config,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
},
}
@@ -4145,7 +4145,7 @@ func TestDeleteThenCreate(t *testing.T) {
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
},
}
@@ -4257,7 +4257,7 @@ func TestTransientStorageReset(t *testing.T) {
}...)
gspec := &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
},
}
@@ -4325,7 +4325,7 @@ func TestEIP3651(t *testing.T) {
config = *params.AllEthashProtocolChanges
gspec = &Genesis{
Config: &config,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr1: {Balance: funds},
addr2: {Balance: funds},
// The address 0xAAAA sloads 0x00 and 0x01
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 932addc396..263310929f 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -32,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
)
@@ -84,7 +84,7 @@ func (b *BlockGen) SetDifficulty(diff *big.Int) {
b.header.Difficulty = diff
}
-// SetPos makes the header a PoS-header (0 difficulty)
+// SetPoS makes the header a PoS-header (0 difficulty)
func (b *BlockGen) SetPoS() {
b.header.Difficulty = new(big.Int)
}
@@ -313,7 +313,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
cm := newChainMaker(parent, config, engine)
- genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
+ genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
b.header = cm.makeHeader(parent, statedb, b.engine)
@@ -370,7 +370,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Forcibly use hash-based state scheme for retaining all nodes in disk.
- triedb := trie.NewDatabase(db, trie.HashDefaults)
+ triedb := triedb.NewDatabase(db, triedb.HashDefaults)
defer triedb.Close()
for i := 0; i < n; i++ {
@@ -415,7 +415,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
// then generate chain on top.
func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) {
db := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(db, trie.HashDefaults)
+ triedb := triedb.NewDatabase(db, triedb.HashDefaults)
defer triedb.Close()
_, err := genesis.Commit(db, triedb)
if err != nil {
@@ -456,7 +456,9 @@ func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engi
excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
header.ExcessBlobGas = &excessBlobGas
header.BlobGasUsed = new(uint64)
- header.ParentBeaconRoot = new(common.Hash)
+ if cm.config.Parlia == nil {
+ header.ParentBeaconRoot = new(common.Hash)
+ }
}
return header
}
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index 84148841f5..b46b898afb 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
func TestGeneratePOSChain(t *testing.T) {
@@ -46,7 +46,7 @@ func TestGeneratePOSChain(t *testing.T) {
asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
gspec = &Genesis{
Config: &config,
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
params.BeaconRootsStorageAddress: {Balance: common.Big0, Code: asm4788},
},
@@ -69,19 +69,19 @@ func TestGeneratePOSChain(t *testing.T) {
storage[common.Hash{0x01}] = common.Hash{0x01}
storage[common.Hash{0x02}] = common.Hash{0x02}
storage[common.Hash{0x03}] = common.HexToHash("0303")
- gspec.Alloc[aa] = GenesisAccount{
+ gspec.Alloc[aa] = types.Account{
Balance: common.Big1,
Nonce: 1,
Storage: storage,
Code: common.Hex2Bytes("6042"),
}
- gspec.Alloc[bb] = GenesisAccount{
+ gspec.Alloc[bb] = types.Account{
Balance: common.Big2,
Nonce: 1,
Storage: storage,
Code: common.Hex2Bytes("600154600354"),
}
- genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults))
+ genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults))
genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) {
gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
@@ -202,9 +202,9 @@ func ExampleGenerateChain() {
// Ensure that key1 has some funds in the genesis block.
gspec := &Genesis{
Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)},
- Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
+ Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
}
- genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults))
+ genesis := gspec.MustCommit(genDb, triedb.NewDatabase(genDb, triedb.HashDefaults))
// This call generates a chain of 5 blocks. The function runs for
// each block and adds different features to gen based on the
diff --git a/core/eip3529tests/eip3529_test_util.go b/core/eip3529tests/eip3529_test_util.go
index c2d63f349d..d2448bb332 100644
--- a/core/eip3529tests/eip3529_test_util.go
+++ b/core/eip3529tests/eip3529_test_util.go
@@ -12,7 +12,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
func newGwei(n int64) *big.Int {
@@ -33,7 +33,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng
balanceBefore = big.NewInt(1000000000000000)
gspec = &core.Genesis{
Config: config,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: balanceBefore},
aa: {
Code: bytecode,
@@ -43,7 +43,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng
},
},
}
- genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil))
+ genesis = gspec.MustCommit(db, triedb.NewDatabase(db, nil))
)
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *core.BlockGen) {
@@ -62,7 +62,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng
// Import the canonical chain
diskdb := rawdb.NewMemoryDatabase()
- gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil))
+ gspec.MustCommit(diskdb, triedb.NewDatabase(diskdb, nil))
chain, err := core.NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil)
if err != nil {
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
index 0034e68d72..7bfe606913 100644
--- a/core/forkid/forkid_test.go
+++ b/core/forkid/forkid_test.go
@@ -74,8 +74,10 @@ func TestCreation(t *testing.T) {
{15049999, 0, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block
{15050000, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // First Gray Glacier block
{20000000, 1681338454, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 1681338455}}, // Last Gray Glacier block
- {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // First Shanghai block
- {30000000, 2000000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}}, // Future Shanghai block
+ {20000000, 1681338455, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // First Shanghai block
+ {30000000, 1710338134, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}}, // Last Shanghai block
+ {40000000, 1710338135, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // First Cancun block
+ {50000000, 2000000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}}, // Future Cancun block
},
},
}
@@ -94,6 +96,7 @@ func TestValidation(t *testing.T) {
// Config that has not timestamp enabled
legacyConfig := *params.MainnetChainConfig
legacyConfig.ShanghaiTime = nil
+ legacyConfig.CancunTime = nil
tests := []struct {
config *params.ChainConfig
@@ -166,14 +169,10 @@ func TestValidation(t *testing.T) {
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
- //
- // TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 88888888, 0, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible.
- //
- // TODO(karalabe): This testcase will fail once mainnet gets timestamped forks, make legacy chain config
{&legacyConfig, 7279999, 0, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale},
//------------------------------------
@@ -250,34 +249,25 @@ func TestValidation(t *testing.T) {
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, but it's not yet aware of Cancun (e.g. non updated node before the fork).
// In this case we don't know if Cancun passed yet or not.
- //
- // TODO(karalabe): Enable this when Cancun is specced
- //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, nil},
+ {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of Cancun (e.g. updated node before the fork). We
// don't know if Cancun passed yet (will pass) or not.
- //
- // TODO(karalabe): Enable this when Cancun is specced and update next timestamp
- //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
+ {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
// Local is mainnet currently in Shanghai only (so it's aware of Cancun), remote announces
// also Shanghai, and it's also aware of some random fork (e.g. misconfigured Cancun). As
// neither forks passed at neither nodes, they may mismatch, but we still connect for now.
- //
- // TODO(karalabe): Enable this when Cancun is specced
- //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0x71147644), Next: math.MaxUint64}, nil},
+ {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(0xdce96c2d), Next: math.MaxUint64}, nil},
// Local is mainnet exactly on Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept.
- //
- // TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
- // {params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
+ {params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
// Local is mainnet Cancun, remote announces Shanghai + knowledge about Cancun. Remote
// is simply out of sync, accept.
- // TODO(karalabe): Enable this when Cancun is specced, update local head and time, next timestamp
- //{params.MainnetChainConfig, 21123456, 1678123456, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, nil},
+ {params.MainnetChainConfig, 21123456, 1710338136, ID{Hash: checksumToBytes(0xdce96c2d), Next: 1710338135}, nil},
// Local is mainnet Prague, remote announces Shanghai + knowledge about Cancun. Remote
// is definitely out of sync. It may or may not need the Prague update, we don't know yet.
@@ -286,9 +276,7 @@ func TestValidation(t *testing.T) {
//{params.MainnetChainConfig, 0, 0, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}, nil},
// Local is mainnet Shanghai, remote announces Cancun. Local is out of sync, accept.
- //
- // TODO(karalabe): Enable this when Cancun is specced, update remote checksum
- //{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x00000000), Next: 0}, nil},
+ {params.MainnetChainConfig, 21000000, 1700000000, ID{Hash: checksumToBytes(0x9f3d2254), Next: 0}, nil},
// Local is mainnet Shanghai, remote announces Cancun, but is not aware of Prague. Local
// out of sync. Local also knows about a future fork, but that is uncertain yet.
@@ -298,9 +286,7 @@ func TestValidation(t *testing.T) {
// Local is mainnet Cancun. remote announces Shanghai but is not aware of further forks.
// Remote needs software update.
- //
- // TODO(karalabe): Enable this when Cancun is specced, update local head and time
- //{params.MainnetChainConfig, 21000000, 1678000000, ID{Hash: checksumToBytes(0x71147644), Next: 0}, ErrRemoteStale},
+ {params.MainnetChainConfig, 21000000, 1710338135, ID{Hash: checksumToBytes(0xdce96c2d), Next: 0}, ErrRemoteStale},
// Local is mainnet Shanghai, and isn't aware of more forks. Remote announces Shanghai +
// 0xffffffff. Local needs software update, reject.
@@ -308,24 +294,20 @@ func TestValidation(t *testing.T) {
// Local is mainnet Shanghai, and is aware of Cancun. Remote announces Cancun +
// 0xffffffff. Local needs software update, reject.
- //
- // TODO(karalabe): Enable this when Cancun is specced, update remote checksum
- //{params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x00000000, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
+ {params.MainnetChainConfig, 20000000, 1668000000, ID{Hash: checksumToBytes(checksumUpdate(0x9f3d2254, math.MaxUint64)), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai, remote is random Shanghai.
{params.MainnetChainConfig, 20000000, 1681338455, ID{Hash: checksumToBytes(0x12345678), Next: 0}, ErrLocalIncompatibleOrStale},
- // Local is mainnet Shanghai, far in the future. Remote announces Gopherium (non existing fork)
+ // Local is mainnet Cancun, far in the future. Remote announces Gopherium (non existing fork)
// at some future timestamp 8888888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
- {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0xdce96c2d), Next: 8888888888}, ErrLocalIncompatibleOrStale},
+ {params.MainnetChainConfig, 88888888, 8888888888, ID{Hash: checksumToBytes(0x9f3d2254), Next: 8888888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Shanghai. Remote is also in Shanghai, but announces Gopherium (non existing
// fork) at timestamp 1668000000, before Cancun. Local is incompatible.
- //
- // TODO(karalabe): Enable this when Cancun is specced
- //{params.MainnetChainConfig, 20999999, 1677999999, ID{Hash: checksumToBytes(0x71147644), Next: 1678000000}, ErrLocalIncompatibleOrStale},
+ {params.MainnetChainConfig, 20999999, 1699999999, ID{Hash: checksumToBytes(0x71147644), Next: 1700000000}, ErrLocalIncompatibleOrStale},
}
genesis := core.DefaultGenesisBlock().ToBlock()
for i, tt := range tests {
diff --git a/core/gen_genesis.go b/core/gen_genesis.go
index 38614252a3..b8acf9df7c 100644
--- a/core/gen_genesis.go
+++ b/core/gen_genesis.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
@@ -18,21 +19,21 @@ var _ = (*genesisSpecMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (g Genesis) MarshalJSON() ([]byte, error) {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce math.HexOrDecimal64 `json:"nonce"`
- Timestamp math.HexOrDecimal64 `json:"timestamp"`
- ExtraData hexutil.Bytes `json:"extraData"`
- GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash common.Hash `json:"mixHash"`
- Coinbase common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number math.HexOrDecimal64 `json:"number"`
- GasUsed math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce math.HexOrDecimal64 `json:"nonce"`
+ Timestamp math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData hexutil.Bytes `json:"extraData"`
+ GasLimit math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash common.Hash `json:"mixHash"`
+ Coinbase common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number math.HexOrDecimal64 `json:"number"`
+ GasUsed math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var enc Genesis
enc.Config = g.Config
@@ -44,7 +45,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
enc.Mixhash = g.Mixhash
enc.Coinbase = g.Coinbase
if g.Alloc != nil {
- enc.Alloc = make(map[common.UnprefixedAddress]GenesisAccount, len(g.Alloc))
+ enc.Alloc = make(map[common.UnprefixedAddress]types.Account, len(g.Alloc))
for k, v := range g.Alloc {
enc.Alloc[common.UnprefixedAddress(k)] = v
}
@@ -61,21 +62,21 @@ func (g Genesis) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (g *Genesis) UnmarshalJSON(input []byte) error {
type Genesis struct {
- Config *params.ChainConfig `json:"config"`
- Nonce *math.HexOrDecimal64 `json:"nonce"`
- Timestamp *math.HexOrDecimal64 `json:"timestamp"`
- ExtraData *hexutil.Bytes `json:"extraData"`
- GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
- Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
- Mixhash *common.Hash `json:"mixHash"`
- Coinbase *common.Address `json:"coinbase"`
- Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"`
- Number *math.HexOrDecimal64 `json:"number"`
- GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
- ParentHash *common.Hash `json:"parentHash"`
- BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
- ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
- BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
+ Config *params.ChainConfig `json:"config"`
+ Nonce *math.HexOrDecimal64 `json:"nonce"`
+ Timestamp *math.HexOrDecimal64 `json:"timestamp"`
+ ExtraData *hexutil.Bytes `json:"extraData"`
+ GasLimit *math.HexOrDecimal64 `json:"gasLimit" gencodec:"required"`
+ Difficulty *math.HexOrDecimal256 `json:"difficulty" gencodec:"required"`
+ Mixhash *common.Hash `json:"mixHash"`
+ Coinbase *common.Address `json:"coinbase"`
+ Alloc map[common.UnprefixedAddress]types.Account `json:"alloc" gencodec:"required"`
+ Number *math.HexOrDecimal64 `json:"number"`
+ GasUsed *math.HexOrDecimal64 `json:"gasUsed"`
+ ParentHash *common.Hash `json:"parentHash"`
+ BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"`
+ ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"`
+ BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"`
}
var dec Genesis
if err := json.Unmarshal(input, &dec); err != nil {
@@ -110,7 +111,7 @@ func (g *Genesis) UnmarshalJSON(input []byte) error {
if dec.Alloc == nil {
return errors.New("missing required field 'alloc' for Genesis")
}
- g.Alloc = make(GenesisAlloc, len(dec.Alloc))
+ g.Alloc = make(types.GenesisAlloc, len(dec.Alloc))
for k, v := range dec.Alloc {
g.Alloc[common.Address(k)] = v
}
diff --git a/core/genesis.go b/core/genesis.go
index 1070f470e6..d0ec4e0f77 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -18,7 +18,6 @@ package core
import (
"bytes"
- "encoding/hex"
"encoding/json"
"errors"
"fmt"
@@ -37,15 +36,21 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
//go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
-//go:generate go run github.com/fjl/gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
+// Deprecated: use types.GenesisAccount instead.
+type GenesisAccount = types.Account
+
+// Deprecated: use types.GenesisAlloc instead.
+type GenesisAlloc = types.GenesisAlloc
+
// Genesis specifies the header fields, state of a genesis block. It also defines hard
// fork switch-over blocks through the chain configuration.
type Genesis struct {
@@ -57,7 +62,7 @@ type Genesis struct {
Difficulty *big.Int `json:"difficulty" gencodec:"required"`
Mixhash common.Hash `json:"mixHash"`
Coinbase common.Address `json:"coinbase"`
- Alloc GenesisAlloc `json:"alloc" gencodec:"required"`
+ Alloc types.GenesisAlloc `json:"alloc" gencodec:"required"`
// These fields are used for consensus tests. Please don't use them
// in actual genesis blocks.
@@ -107,29 +112,14 @@ func ReadGenesis(db ethdb.Database) (*Genesis, error) {
return &genesis, nil
}
-// GenesisAlloc specifies the initial state that is part of the genesis block.
-type GenesisAlloc map[common.Address]GenesisAccount
-
-func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
- m := make(map[common.UnprefixedAddress]GenesisAccount)
- if err := json.Unmarshal(data, &m); err != nil {
- return err
- }
- *ga = make(GenesisAlloc)
- for addr, a := range m {
- (*ga)[common.Address(addr)] = a
- }
- return nil
-}
-
-// hash computes the state root according to the genesis specification.
-func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
+// hashAlloc computes the state root according to the genesis specification.
+func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
// If a genesis-time verkle trie is requested, create a trie config
// with the verkle trie enabled so that the tree can be initialized
// as such.
- var config *trie.Config
+ var config *triedb.Config
if isVerkle {
- config = &trie.Config{
+ config = &triedb.Config{
PathDB: pathdb.Defaults,
IsVerkle: true,
}
@@ -156,13 +146,13 @@ func (ga *GenesisAlloc) hash(isVerkle bool) (common.Hash, error) {
return root, err
}
-// flush is very similar with hash, but the main difference is all the generated
+// flushAlloc is very similar with hash, but the main difference is all the generated
// states will be persisted into the given database. Also, the genesis state
// specification will be flushed as well.
-func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error {
- trieConfig := triedb.Config()
- if trieConfig != nil {
- trieConfig.NoTries = false
+func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Database, blockhash common.Hash) error {
+ triedbConfig := triedb.Config()
+ if triedbConfig != nil {
+ triedbConfig.NoTries = false
}
statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil {
@@ -198,15 +188,6 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas
return nil
}
-// GenesisAccount is an account in the state of the genesis block.
-type GenesisAccount struct {
- Code []byte `json:"code,omitempty"`
- Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
- Balance *big.Int `json:"balance" gencodec:"required"`
- Nonce uint64 `json:"nonce,omitempty"`
- PrivateKey []byte `json:"secretKey,omitempty"` // for tests
-}
-
// field type overrides for gencodec
type genesisSpecMarshaling struct {
Nonce math.HexOrDecimal64
@@ -216,40 +197,12 @@ type genesisSpecMarshaling struct {
GasUsed math.HexOrDecimal64
Number math.HexOrDecimal64
Difficulty *math.HexOrDecimal256
- Alloc map[common.UnprefixedAddress]GenesisAccount
+ Alloc map[common.UnprefixedAddress]types.Account
BaseFee *math.HexOrDecimal256
ExcessBlobGas *math.HexOrDecimal64
BlobGasUsed *math.HexOrDecimal64
}
-type genesisAccountMarshaling struct {
- Code hexutil.Bytes
- Balance *math.HexOrDecimal256
- Nonce math.HexOrDecimal64
- Storage map[storageJSON]storageJSON
- PrivateKey hexutil.Bytes
-}
-
-// storageJSON represents a 256 bit byte array, but allows less than 256 bits when
-// unmarshaling from hex.
-type storageJSON common.Hash
-
-func (h *storageJSON) UnmarshalText(text []byte) error {
- text = bytes.TrimPrefix(text, []byte("0x"))
- if len(text) > 64 {
- return fmt.Errorf("too many hex characters in storage key/value %q", text)
- }
- offset := len(h) - len(text)/2 // pad on the left
- if _, err := hex.Decode(h[offset:], text); err != nil {
- return fmt.Errorf("invalid hex storage key/value %q", text)
- }
- return nil
-}
-
-func (h storageJSON) MarshalText() ([]byte, error) {
- return hexutil.Bytes(h[:]).MarshalText()
-}
-
// GenesisMismatchError is raised when trying to overwrite an existing
// genesis block with an incompatible one.
type GenesisMismatchError struct {
@@ -283,11 +236,11 @@ type ChainOverrides struct {
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
//
// The returned chain configuration is never nil.
-func SetupGenesisBlock(db ethdb.Database, triedb *trie.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
}
-func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -453,7 +406,7 @@ func (g *Genesis) IsVerkle() bool {
// ToBlock returns the genesis block according to genesis specification.
func (g *Genesis) ToBlock() *types.Block {
- root, err := g.Alloc.hash(g.IsVerkle())
+ root, err := hashAlloc(&g.Alloc, g.IsVerkle())
if err != nil {
panic(err)
}
@@ -499,7 +452,10 @@ func (g *Genesis) ToBlock() *types.Block {
// EIP-4788: The parentBeaconBlockRoot of the genesis block is always
// the zero hash. This is because the genesis block does not have a parent
// by definition.
- head.ParentBeaconRoot = new(common.Hash)
+ if conf.Parlia == nil {
+ head.ParentBeaconRoot = new(common.Hash)
+ }
+
// EIP-4844 fields
head.ExcessBlobGas = g.ExcessBlobGas
head.BlobGasUsed = g.BlobGasUsed
@@ -516,7 +472,7 @@ func (g *Genesis) ToBlock() *types.Block {
// Commit writes the block and state of a genesis specification to the database.
// The block is committed as the canonical head block.
-func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block, error) {
+func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Block, error) {
block := g.ToBlock()
if block.Number().Sign() != 0 {
return nil, errors.New("can't commit genesis block with number > 0")
@@ -531,10 +487,10 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
return nil, errors.New("can't start clique chain without signers")
}
- // All the checks has passed, flush the states derived from the genesis
+ // All the checks has passed, flushAlloc the states derived from the genesis
// specification as well as the specification itself into the provided
// database.
- if err := g.Alloc.flush(db, triedb, block.Hash()); err != nil {
+ if err := flushAlloc(&g.Alloc, db, triedb, block.Hash()); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
@@ -550,7 +506,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
// MustCommit writes the genesis block and state to db, panicking on error.
// The block is committed as the canonical head block.
-func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block {
+func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.Block {
block, err := g.Commit(db, triedb)
if err != nil {
panic(err)
@@ -613,7 +569,7 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1),
- Alloc: map[common.Address]GenesisAccount{
+ Alloc: map[common.Address]types.Account{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
@@ -626,12 +582,12 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
},
}
if faucet != nil {
- genesis.Alloc[*faucet] = GenesisAccount{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}
+ genesis.Alloc[*faucet] = types.Account{Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}
}
return genesis
}
-func decodePrealloc(data string) GenesisAlloc {
+func decodePrealloc(data string) types.GenesisAlloc {
var p []struct {
Addr *big.Int
Balance *big.Int
@@ -647,9 +603,9 @@ func decodePrealloc(data string) GenesisAlloc {
if err := rlp.NewStream(strings.NewReader(data), 0).Decode(&p); err != nil {
panic(err)
}
- ga := make(GenesisAlloc, len(p))
+ ga := make(types.GenesisAlloc, len(p))
for _, account := range p {
- acc := GenesisAccount{Balance: account.Balance}
+ acc := types.Account{Balance: account.Balance}
if account.Misc != nil {
acc.Nonce = account.Misc.Nonce
acc.Code = account.Misc.Code
diff --git a/core/genesis_test.go b/core/genesis_test.go
index aeaa1dd979..4b280bcf13 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -27,11 +27,12 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
func TestSetupGenesis(t *testing.T) {
@@ -44,7 +45,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
customg = Genesis{
Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)},
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
},
}
@@ -62,7 +63,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
+ return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
},
wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges,
@@ -70,7 +71,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil)
+ return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
},
wantHash: params.BSCGenesisHash,
wantConfig: params.BSCChainConfig,
@@ -78,8 +79,8 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, newDbConfig(scheme)))
- return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil)
+ DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
+ return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
},
wantHash: params.MainnetGenesisHash,
wantConfig: params.MainnetChainConfig,
@@ -87,7 +88,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := trie.NewDatabase(db, newDbConfig(scheme))
+ tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, nil)
},
@@ -97,7 +98,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "custom block in DB, genesis == chapel",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := trie.NewDatabase(db, newDbConfig(scheme))
+ tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, DefaultChapelGenesisBlock())
},
@@ -108,7 +109,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
{
name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
- tdb := trie.NewDatabase(db, newDbConfig(scheme))
+ tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, &customg)
},
@@ -120,7 +121,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
- tdb := trie.NewDatabase(db, newDbConfig(scheme))
+ tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb)
bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil)
@@ -177,7 +178,7 @@ func TestGenesisHashes(t *testing.T) {
} {
// Test via MustCommit
db := rawdb.NewMemoryDatabase()
- if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want {
+ if have := c.genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)).Hash(); have != c.want {
t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
}
// Test via ToBlock
@@ -195,7 +196,7 @@ func TestGenesis_Commit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
+ genesisBlock := genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
if genesis.Difficulty != nil {
t.Fatalf("assumption wrong")
@@ -217,16 +218,16 @@ func TestGenesis_Commit(t *testing.T) {
func TestReadWriteGenesisAlloc(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
- alloc = &GenesisAlloc{
+ alloc = &types.GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
}
- hash, _ = alloc.hash(false)
+ hash, _ = hashAlloc(alloc, false)
)
blob, _ := json.Marshal(alloc)
rawdb.WriteGenesisStateSpec(db, hash, blob)
- var reload GenesisAlloc
+ var reload types.GenesisAlloc
err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash))
if err != nil {
t.Fatalf("Failed to load genesis state %v", err)
@@ -270,11 +271,11 @@ func TestConfigOrDefault(t *testing.T) {
}
}
-func newDbConfig(scheme string) *trie.Config {
+func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme {
- return trie.HashDefaults
+ return triedb.HashDefaults
}
- return &trie.Config{PathDB: pathdb.Defaults}
+ return &triedb.Config{PathDB: pathdb.Defaults}
}
func TestVerkleGenesisCommit(t *testing.T) {
@@ -312,7 +313,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
Config: verkleConfig,
Timestamp: verkleTime,
Difficulty: big.NewInt(0),
- Alloc: GenesisAlloc{
+ Alloc: types.GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
},
}
@@ -324,7 +325,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
}
db := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(db, &trie.Config{IsVerkle: true, PathDB: pathdb.Defaults})
+ triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults})
block := genesis.MustCommit(db, triedb)
if !bytes.Equal(block.Root().Bytes(), expected) {
t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got)
diff --git a/core/headerchain_test.go b/core/headerchain_test.go
index 2c0323e6f7..25d9bfffcb 100644
--- a/core/headerchain_test.go
+++ b/core/headerchain_test.go
@@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
func verifyUnbrokenCanonchain(hc *HeaderChain) error {
@@ -73,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) {
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges}
)
- gspec.Commit(db, trie.NewDatabase(db, nil))
+ gspec.Commit(db, triedb.NewDatabase(db, nil))
hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false })
if err != nil {
t.Fatal(err)
diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go
index 215d63bebb..35ede96643 100644
--- a/core/rawdb/accessors_trie.go
+++ b/core/rawdb/accessors_trie.go
@@ -288,13 +288,13 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has
// if the state is not present in database.
func ReadStateScheme(db ethdb.Reader) string {
// Check if state in path-based scheme is present
- blob, _ := ReadAccountTrieNode(db, nil)
+ blob, _ := ReadAccountTrieNode(db.StateStoreReader(), nil)
if len(blob) != 0 {
return PathScheme
}
// The root node might be deleted during the initial snap sync, check
// the persistent state id then.
- if id := ReadPersistentStateID(db); id != 0 {
+ if id := ReadPersistentStateID(db.StateStoreReader()); id != 0 {
return PathScheme
}
// In a hash-based scheme, the genesis state is consistently stored
@@ -304,7 +304,7 @@ func ReadStateScheme(db ethdb.Reader) string {
if header == nil {
return "" // empty datadir
}
- blob = ReadLegacyTrieNode(db, header.Root)
+ blob = ReadLegacyTrieNode(db.StateStoreReader(), header.Root)
if len(blob) == 0 {
return "" // no state in disk
}
diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go
index a73e586192..1d780063ff 100644
--- a/core/rawdb/ancient_utils.go
+++ b/core/rawdb/ancient_utils.go
@@ -91,7 +91,7 @@ func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) {
infos = append(infos, info)
case StateFreezerName:
- if ReadStateScheme(db) != PathScheme {
+ if ReadStateScheme(db) != PathScheme || db.StateStore() != nil {
continue
}
datadir, err := db.AncientDatadir()
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index e7e53947f6..80082830f9 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -41,7 +41,15 @@ type freezerdb struct {
ancientRoot string
ethdb.KeyValueStore
ethdb.AncientStore
- diffStore ethdb.KeyValueStore
+ diffStore ethdb.KeyValueStore
+ stateStore ethdb.Database
+}
+
+func (frdb *freezerdb) StateStoreReader() ethdb.Reader {
+ if frdb.stateStore == nil {
+ return frdb
+ }
+ return frdb.stateStore
}
// AncientDatadir returns the path of root ancient directory.
@@ -64,6 +72,11 @@ func (frdb *freezerdb) Close() error {
errs = append(errs, err)
}
}
+ if frdb.stateStore != nil {
+ if err := frdb.stateStore.Close(); err != nil {
+ errs = append(errs, err)
+ }
+ }
if len(errs) != 0 {
return fmt.Errorf("%v", errs)
}
@@ -81,6 +94,17 @@ func (frdb *freezerdb) SetDiffStore(diff ethdb.KeyValueStore) {
frdb.diffStore = diff
}
+func (frdb *freezerdb) StateStore() ethdb.Database {
+ return frdb.stateStore
+}
+
+func (frdb *freezerdb) SetStateStore(state ethdb.Database) {
+ if frdb.stateStore != nil {
+ frdb.stateStore.Close()
+ }
+ frdb.stateStore = state
+}
+
// Freeze is a helper method used for external testing to trigger and block until
// a freeze cycle completes, without having to sleep for a minute to trigger the
// automatic background run.
@@ -104,7 +128,8 @@ func (frdb *freezerdb) Freeze(threshold uint64) error {
// nofreezedb is a database wrapper that disables freezer data retrievals.
type nofreezedb struct {
ethdb.KeyValueStore
- diffStore ethdb.KeyValueStore
+ diffStore ethdb.KeyValueStore
+ stateStore ethdb.Database
}
// HasAncient returns an error as we don't have a backing chain freezer.
@@ -170,6 +195,21 @@ func (db *nofreezedb) SetDiffStore(diff ethdb.KeyValueStore) {
db.diffStore = diff
}
+func (db *nofreezedb) StateStore() ethdb.Database {
+ return db.stateStore
+}
+
+func (db *nofreezedb) SetStateStore(state ethdb.Database) {
+ db.stateStore = state
+}
+
+func (db *nofreezedb) StateStoreReader() ethdb.Reader {
+ if db.stateStore != nil {
+ return db.stateStore
+ }
+ return db
+}
+
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
@@ -609,6 +649,11 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
it := db.NewIterator(keyPrefix, keyStart)
defer it.Release()
+ var trieIter ethdb.Iterator
+ if db.StateStore() != nil {
+ trieIter = db.StateStore().NewIterator(keyPrefix, nil)
+ defer trieIter.Release()
+ }
var (
count int64
start = time.Now()
@@ -659,14 +704,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
bodies.Add(size)
case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
receipts.Add(size)
+ case IsLegacyTrieNode(key, it.Value()):
+ legacyTries.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
tds.Add(size)
case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
numHashPairings.Add(size)
case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
hashNumPairings.Add(size)
- case IsLegacyTrieNode(key, it.Value()):
- legacyTries.Add(size)
case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
stateLookups.Add(size)
case IsAccountTrieNode(key):
@@ -728,6 +773,46 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
logged = time.Now()
}
}
+ // inspect separate trie db
+ if trieIter != nil {
+ count = 0
+ logged = time.Now()
+ for trieIter.Next() {
+ var (
+ key = trieIter.Key()
+ value = trieIter.Value()
+ size = common.StorageSize(len(key) + len(value))
+ )
+
+ switch {
+ case IsLegacyTrieNode(key, value):
+ legacyTries.Add(size)
+ case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength:
+ stateLookups.Add(size)
+ case IsAccountTrieNode(key):
+ accountTries.Add(size)
+ case IsStorageTrieNode(key):
+ storageTries.Add(size)
+ default:
+ var accounted bool
+ for _, meta := range [][]byte{
+ fastTrieProgressKey, persistentStateIDKey, trieJournalKey} {
+ if bytes.Equal(key, meta) {
+ metadata.Add(size)
+ break
+ }
+ }
+ if !accounted {
+ unaccounted.Add(size)
+ }
+ }
+ count++
+ if count%1000 == 0 && time.Since(logged) > 8*time.Second {
+ log.Info("Inspecting separate state database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
+ logged = time.Now()
+ }
+ }
+ }
// Display the database statistic of key-value store.
stats := [][]string{
{"Key-Value store", "Headers", headers.Size(), headers.Count()},
@@ -768,6 +853,28 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
}
total += ancient.size()
}
+
+ // inspect ancient state in separate trie db if exist
+ if trieIter != nil {
+ stateAncients, err := inspectFreezers(db.StateStore())
+ if err != nil {
+ return err
+ }
+ for _, ancient := range stateAncients {
+ for _, table := range ancient.sizes {
+ if ancient.name == "chain" {
+ break
+ }
+ stats = append(stats, []string{
+ fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)),
+ strings.Title(table.name),
+ table.size.String(),
+ fmt.Sprintf("%d", ancient.count()),
+ })
+ }
+ total += ancient.size()
+ }
+ }
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Database", "Category", "Size", "Items"})
table.SetFooter([]string{"", "Total", total.String(), " "})
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index daa594ec90..5bd0d54c66 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -1004,7 +1004,7 @@ func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error {
}
if stat.Size() == 0 {
- return fmt.Errorf("Stat size is zero when ResetVirtualTail.")
+ return errors.New("Stat size is zero when ResetVirtualTail.")
}
var firstIndex indexEntry
diff --git a/core/rawdb/freezer_table_test.go b/core/rawdb/freezer_table_test.go
index d9a1aee595..49cdf0d89e 100644
--- a/core/rawdb/freezer_table_test.go
+++ b/core/rawdb/freezer_table_test.go
@@ -897,7 +897,7 @@ func getChunk(size int, b int) []byte {
}
// TODO (?)
-// - test that if we remove several head-files, aswell as data last data-file,
+// - test that if we remove several head-files, as well as data last data-file,
// the index is truncated accordingly
// Right now, the freezer would fail on these conditions:
// 1. have data files d0, d1, d2, d3
diff --git a/core/rawdb/table.go b/core/rawdb/table.go
index 509bdbc94e..dc49d526c3 100644
--- a/core/rawdb/table.go
+++ b/core/rawdb/table.go
@@ -213,6 +213,18 @@ func (t *table) SetDiffStore(diff ethdb.KeyValueStore) {
panic("not implement")
}
+func (t *table) StateStore() ethdb.Database {
+ return nil
+}
+
+func (t *table) SetStateStore(state ethdb.Database) {
+ panic("not implement")
+}
+
+func (t *table) StateStoreReader() ethdb.Reader {
+ return nil
+}
+
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (t *table) NewBatchWithSize(size int) ethdb.Batch {
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
diff --git a/core/rlp_test.go b/core/rlp_test.go
index a2fb4937f8..bc37408537 100644
--- a/core/rlp_test.go
+++ b/core/rlp_test.go
@@ -41,7 +41,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block {
funds = big.NewInt(1_000_000_000_000_000_000)
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{address: {Balance: funds}},
+ Alloc: types.GenesisAlloc{address: {Balance: funds}},
}
)
// We need to generate as many blocks +1 as uncles
diff --git a/core/state/database.go b/core/state/database.go
index 4b6086b3cb..cae5f46ac2 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
+ "github.com/ethereum/go-ethereum/triedb"
)
const (
@@ -67,7 +68,7 @@ type Database interface {
DiskDB() ethdb.KeyValueStore
// TrieDB returns the underlying trie database for managing trie nodes.
- TrieDB() *trie.Database
+ TrieDB() *triedb.Database
// NoTries returns whether the database has tries storage.
NoTries() bool
@@ -153,20 +154,20 @@ func NewDatabase(db ethdb.Database) Database {
// NewDatabaseWithConfig creates a backing store for state. The returned database
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
// large memory cache.
-func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
+func NewDatabaseWithConfig(db ethdb.Database, config *triedb.Config) Database {
noTries := config != nil && config.NoTries
return &cachingDB{
disk: db,
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- triedb: trie.NewDatabase(db, config),
+ triedb: triedb.NewDatabase(db, config),
noTries: noTries,
}
}
// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
-func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database {
+func NewDatabaseWithNodeDB(db ethdb.Database, triedb *triedb.Database) Database {
noTries := triedb != nil && triedb.Config() != nil && triedb.Config().NoTries
return &cachingDB{
@@ -182,7 +183,7 @@ type cachingDB struct {
disk ethdb.KeyValueStore
codeSizeCache *lru.Cache[common.Hash, int]
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
- triedb *trie.Database
+ triedb *triedb.Database
noTries bool
}
@@ -286,6 +287,6 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore {
}
// TrieDB retrieves any intermediate trie-node caching layer.
-func (db *cachingDB) TrieDB() *trie.Database {
+func (db *cachingDB) TrieDB() *triedb.Database {
return db.triedb
}
diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go
index 3ba90f73de..8c28ad7011 100644
--- a/core/state/pruner/pruner.go
+++ b/core/state/pruner/pruner.go
@@ -42,6 +42,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
const (
@@ -102,7 +103,7 @@ func NewPruner(db ethdb.Database, config Config, triesInMemory uint64) (*Pruner,
return nil, errors.New("failed to load head block")
}
// Offline pruning is only supported in legacy hash based scheme.
- triedb := trie.NewDatabase(db, trie.HashDefaults)
+ triedb := triedb.NewDatabase(db, triedb.HashDefaults)
snapconfig := snapshot.Config{
CacheSize: 256,
@@ -158,13 +159,19 @@ func (p *Pruner) PruneAll(genesis *core.Genesis) error {
}
func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
+ var pruneDB ethdb.Database
+ if maindb != nil && maindb.StateStore() != nil {
+ pruneDB = maindb.StateStore()
+ } else {
+ pruneDB = maindb
+ }
var (
count int
size common.StorageSize
pstart = time.Now()
logged = time.Now()
- batch = maindb.NewBatch()
- iter = maindb.NewIterator(nil, nil)
+ batch = pruneDB.NewBatch()
+ iter = pruneDB.NewIterator(nil, nil)
)
start := time.Now()
for iter.Next() {
@@ -194,7 +201,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
batch.Reset()
iter.Release()
- iter = maindb.NewIterator(nil, key)
+ iter = pruneDB.NewIterator(nil, key)
}
}
}
@@ -218,7 +225,7 @@ func pruneAll(maindb ethdb.Database, g *core.Genesis) error {
end = nil
}
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
- if err := maindb.Compact(start, end); err != nil {
+ if err := pruneDB.Compact(start, end); err != nil {
log.Error("Database compaction failed", "error", err)
return err
}
@@ -246,16 +253,22 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
// the trie nodes(and codes) belong to the active state will be filtered
// out. A very small part of stale tries will also be filtered because of
// the false-positive rate of bloom filter. But the assumption is held here
- // that the false-positive is low enough(~0.05%). The probablity of the
+ // that the false-positive is low enough(~0.05%). The probability of the
// dangling node is the state root is super low. So the dangling nodes in
// theory will never ever be visited again.
+ var pruneDB ethdb.Database
+ if maindb != nil && maindb.StateStore() != nil {
+ pruneDB = maindb.StateStore()
+ } else {
+ pruneDB = maindb
+ }
var (
skipped, count int
size common.StorageSize
pstart = time.Now()
logged = time.Now()
- batch = maindb.NewBatch()
- iter = maindb.NewIterator(nil, nil)
+ batch = pruneDB.NewBatch()
+ iter = pruneDB.NewIterator(nil, nil)
)
for iter.Next() {
key := iter.Key()
@@ -302,7 +315,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
batch.Reset()
iter.Release()
- iter = maindb.NewIterator(nil, key)
+ iter = pruneDB.NewIterator(nil, key)
}
}
}
@@ -347,7 +360,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
end = nil
}
log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart)))
- if err := maindb.Compact(start, end); err != nil {
+ if err := pruneDB.Compact(start, end); err != nil {
log.Error("Database compaction failed", "error", err)
return err
}
@@ -585,10 +598,17 @@ func (p *Pruner) Prune(root common.Hash) error {
// Use the bottom-most diff layer as the target
root = layers[len(layers)-1].Root()
}
+ // if the separated state db has been set, use this db to prune data
+ var trienodedb ethdb.Database
+ if p.db != nil && p.db.StateStore() != nil {
+ trienodedb = p.db.StateStore()
+ } else {
+ trienodedb = p.db
+ }
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
- if !rawdb.HasLegacyTrieNode(p.db, root) {
+ if !rawdb.HasLegacyTrieNode(trienodedb, root) {
// The special case is for clique based networks(goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
@@ -602,7 +622,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 2; i-- {
- if rawdb.HasLegacyTrieNode(p.db, layers[i].Root()) {
+ if rawdb.HasLegacyTrieNode(trienodedb, layers[i].Root()) {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
@@ -610,7 +630,7 @@ func (p *Pruner) Prune(root common.Hash) error {
}
}
if !found {
- if blob := rawdb.ReadLegacyTrieNode(p.db, p.snaptree.DiskRoot()); len(blob) != 0 {
+ if blob := rawdb.ReadLegacyTrieNode(trienodedb, p.snaptree.DiskRoot()); len(blob) != 0 {
root = p.snaptree.DiskRoot()
found = true
log.Info("Selecting disk-layer as the pruning target", "root", root)
@@ -693,7 +713,7 @@ func RecoverPruning(datadir string, db ethdb.Database, triesInMemory uint64) err
AsyncBuild: false,
}
// Offline pruning is only supported in legacy hash based scheme.
- triedb := trie.NewDatabase(db, trie.HashDefaults)
+ triedb := triedb.NewDatabase(db, triedb.HashDefaults)
snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root(), int(triesInMemory), false)
if err != nil {
return err // The relevant snapshot(s) might not exist
@@ -736,7 +756,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
if genesis == nil {
return errors.New("missing genesis block")
}
- t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults))
+ t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), triedb.NewDatabase(db, triedb.HashDefaults))
if err != nil {
return err
}
@@ -760,7 +780,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error {
}
if acc.Root != types.EmptyRootHash {
id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root)
- storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults))
+ storageTrie, err := trie.NewStateTrie(id, triedb.NewDatabase(db, triedb.HashDefaults))
if err != nil {
return err
}
diff --git a/core/state/shared_pool.go b/core/state/shared_pool.go
index ba96c2c27d..94f3cb3253 100644
--- a/core/state/shared_pool.go
+++ b/core/state/shared_pool.go
@@ -6,7 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common"
)
-// sharedPool is used to store maps of originStorage of stateObjects
+// StoragePool is used to store maps of originStorage of stateObjects
type StoragePool struct {
sync.RWMutex
sharedMap map[common.Address]*sync.Map
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index fb01d9b4a5..eb9fa2ed13 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -43,7 +43,7 @@ var (
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
// aggregatorItemLimit is an approximate number of items that will end up
- // in the agregator layer before it's flushed out to disk. A plain account
+ // in the aggregator layer before it's flushed out to disk. A plain account
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 646d84fc32..58ce3e3657 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -26,13 +26,13 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
// diskLayer is a low level persistent snapshot built on top of a key-value store.
type diskLayer struct {
diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot
- triedb *trie.Database // Trie node cache for reconstruction purposes
+ triedb *triedb.Database // Trie node cache for reconstruction purposes
cache *fastcache.Cache // Cache to avoid hitting the disk for direct access
root common.Hash // Root hash of the base snapshot
diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go
index 8ec34b22fe..f524253875 100644
--- a/core/state/snapshot/disklayer_test.go
+++ b/core/state/snapshot/disklayer_test.go
@@ -139,7 +139,7 @@ func TestDiskMerge(t *testing.T) {
// Retrieve all the data through the disk layer and validate it
base = snaps.Snapshot(diffRoot)
if _, ok := base.(*diskLayer); !ok {
- t.Fatalf("update not flattend into the disk layer")
+ t.Fatalf("update not flattened into the disk layer")
}
// assertAccount ensures that an account matches the given blob.
@@ -362,7 +362,7 @@ func TestDiskPartialMerge(t *testing.T) {
// Retrieve all the data through the disk layer and validate it
base = snaps.Snapshot(diffRoot)
if _, ok := base.(*diskLayer); !ok {
- t.Fatalf("test %d: update not flattend into the disk layer", i)
+ t.Fatalf("test %d: update not flattened into the disk layer", i)
}
assertAccount(accNoModNoCache, accNoModNoCache[:])
assertAccount(accNoModCache, accNoModCache[:])
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index db2e1ad9fe..5b20d9e275 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -55,7 +56,7 @@ var (
// generateSnapshot regenerates a brand new snapshot based on an existing state
// database and head block asynchronously. The snapshot is returned immediately
// and generation is continued in the background until done.
-func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer {
+func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, cache int, root common.Hash) *diskLayer {
// Create a new disk layer with an initialized state marker at zero
var (
stats = &generatorStats{start: time.Now()}
@@ -353,7 +354,7 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi
var resolver trie.NodeResolver
if len(result.keys) > 0 {
mdb := rawdb.NewMemoryDatabase()
- tdb := trie.NewDatabase(mdb, trie.HashDefaults)
+ tdb := triedb.NewDatabase(mdb, triedb.HashDefaults)
defer tdb.Close()
snapTrie := trie.NewEmpty(tdb)
for i, key := range result.keys {
diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go
index 7d941f6285..da93ebc875 100644
--- a/core/state/snapshot/generate_test.go
+++ b/core/state/snapshot/generate_test.go
@@ -29,9 +29,10 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -155,20 +156,20 @@ func checkSnapRoot(t *testing.T, snap *diskLayer, trieRoot common.Hash) {
type testHelper struct {
diskdb ethdb.Database
- triedb *trie.Database
+ triedb *triedb.Database
accTrie *trie.StateTrie
nodes *trienode.MergedNodeSet
}
func newHelper(scheme string) *testHelper {
diskdb := rawdb.NewMemoryDatabase()
- config := &trie.Config{}
+ config := &triedb.Config{}
if scheme == rawdb.PathScheme {
config.PathDB = &pathdb.Config{} // disable caching
} else {
config.HashDB = &hashdb.Config{} // disable caching
}
- triedb := trie.NewDatabase(diskdb, config)
+ triedb := triedb.NewDatabase(diskdb, config)
accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb)
return &testHelper{
diskdb: diskdb,
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index 9df58aee09..cc60f79ce7 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
const journalVersion uint64 = 0
@@ -120,7 +120,7 @@ func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, jou
}
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
-func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cache int, recovery bool, noBuild bool, withoutTrie bool) (snapshot, bool, error) {
+func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash, cache int, recovery bool, noBuild bool, withoutTrie bool) (snapshot, bool, error) {
// If snapshotting is disabled (initial sync in progress), don't do anything,
// wait for the chain to permit us to do something meaningful
if rawdb.ReadSnapshotDisabled(diskdb) {
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index 99ef77a5c8..117504f8b6 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -180,7 +180,7 @@ type Config struct {
type Tree struct {
config Config // Snapshots configurations
diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
- triedb *trie.Database // In-memory cache to access the trie through
+ triedb *triedb.Database // In-memory cache to access the trie through
layers map[common.Hash]snapshot // Collection of all known layers
lock sync.RWMutex
capLimit int
@@ -205,7 +205,7 @@ type Tree struct {
// state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
-func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root common.Hash, cap int, withoutTrie bool) (*Tree, error) {
+func New(config Config, diskdb ethdb.KeyValueStore, triedb *triedb.Database, root common.Hash, cap int, withoutTrie bool) (*Tree, error) {
snap := &Tree{
config: config,
diskdb: diskdb,
@@ -277,6 +277,14 @@ func (t *Tree) Disable() {
for _, layer := range t.layers {
switch layer := layer.(type) {
case *diskLayer:
+
+ layer.lock.RLock()
+ generating := layer.genMarker != nil
+ layer.lock.RUnlock()
+ if !generating {
+ // Generator is already aborted or finished
+ break
+ }
// If the base layer is generating, abort it
if layer.genAbort != nil {
abort := make(chan *generatorStats)
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 524bdc47cd..3f2c8e9786 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -95,7 +95,7 @@ type stateObject struct {
// empty returns whether the account is considered empty.
func (s *stateObject) empty() bool {
- return s.data.Nonce == 0 && s.data.Balance.Sign() == 0 && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
+ return s.data.Nonce == 0 && s.data.Balance.IsZero() && bytes.Equal(s.data.CodeHash, types.EmptyCodeHash.Bytes())
}
// newObject creates a state object.
@@ -480,7 +480,7 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) {
func (s *stateObject) AddBalance(amount *uint256.Int) {
// EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
- if amount.Sign() == 0 {
+ if amount.IsZero() {
if s.empty() {
s.touch()
}
@@ -492,7 +492,7 @@ func (s *stateObject) AddBalance(amount *uint256.Int) {
// SubBalance removes amount from s's balance.
// It is used to remove funds from the origin account of a transfer.
func (s *stateObject) SubBalance(amount *uint256.Int) {
- if amount.Sign() == 0 {
+ if amount.IsZero() {
return
}
s.SetBalance(new(uint256.Int).Sub(s.Balance(), amount))
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 3534938fd4..c19f2c9138 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
)
@@ -43,7 +43,7 @@ func newStateEnv() *stateEnv {
func TestDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+ tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
sdb, _ := New(types.EmptyRootHash, tdb, nil)
s := &stateEnv{db: db, state: sdb}
@@ -102,7 +102,7 @@ func TestDump(t *testing.T) {
func TestIterativeDump(t *testing.T) {
db := rawdb.NewMemoryDatabase()
- tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true})
+ tdb := NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
sdb, _ := New(types.EmptyRootHash, tdb, nil)
s := &stateEnv{db: db, state: sdb}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index cb024661bd..038bc7899f 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -18,6 +18,7 @@
package state
import (
+ "errors"
"fmt"
"runtime"
"sort"
@@ -747,7 +748,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
if s.trie == nil {
tr, err := s.db.OpenTrie(s.originalRoot)
if err != nil {
- s.setError(fmt.Errorf("failed to open trie tree"))
+ s.setError(errors.New("failed to open trie tree"))
return nil
}
s.trie = tr
@@ -1003,7 +1004,7 @@ func (s *StateDB) WaitPipeVerification() error {
// Need to wait for the parent trie to commit
if s.snap != nil {
if valid := s.snap.WaitAndGetVerifyRes(); !valid {
- return fmt.Errorf("verification on parent snap failed")
+ return errors.New("verification on parent snap failed")
}
}
return nil
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 856d3394f6..06a1aed153 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -35,8 +35,9 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/triestate"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
@@ -181,7 +182,7 @@ func (test *stateTest) run() bool {
storageList = append(storageList, copy2DSet(states.Storages))
}
disk = rawdb.NewMemoryDatabase()
- tdb = trie.NewDatabase(disk, &trie.Config{PathDB: pathdb.Defaults})
+ tdb = triedb.NewDatabase(disk, &triedb.Config{PathDB: pathdb.Defaults})
sdb = NewDatabaseWithNodeDB(disk, tdb)
byzantium = rand.Intn(2) == 0
)
@@ -253,7 +254,7 @@ func (test *stateTest) run() bool {
// - the account was indeed not present in trie
// - the account is present in new trie, nil->nil is regarded as invalid
// - the slots transition is correct
-func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
+func (test *stateTest) verifyAccountCreation(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error {
// Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes())
@@ -304,7 +305,7 @@ func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database
// - the account was indeed present in trie
// - the account in old trie matches the provided value
// - the slots transition is correct
-func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
+func (test *stateTest) verifyAccountUpdate(next common.Hash, db *triedb.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error {
// Verify account change
addrHash := crypto.Keccak256Hash(addr.Bytes())
oBlob, err := otr.Get(addrHash.Bytes())
@@ -358,7 +359,7 @@ func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database,
return nil
}
-func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
+func (test *stateTest) verify(root common.Hash, next common.Hash, db *triedb.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error {
otr, err := trie.New(trie.StateTrieID(root), db)
if err != nil {
return err
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 22e7d3aa43..aab219561d 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -35,9 +35,10 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
@@ -47,7 +48,7 @@ func TestUpdateLeaks(t *testing.T) {
// Create an empty state database
var (
db = rawdb.NewMemoryDatabase()
- tdb = trie.NewDatabase(db, nil)
+ tdb = triedb.NewDatabase(db, nil)
)
state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil)
@@ -83,8 +84,8 @@ func TestIntermediateLeaks(t *testing.T) {
// Create two state databases, one transitioning to the final state, the other final from the beginning
transDb := rawdb.NewMemoryDatabase()
finalDb := rawdb.NewMemoryDatabase()
- transNdb := trie.NewDatabase(transDb, nil)
- finalNdb := trie.NewDatabase(finalDb, nil)
+ transNdb := triedb.NewDatabase(transDb, nil)
+ finalNdb := triedb.NewDatabase(finalDb, nil)
transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil)
finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil)
@@ -808,20 +809,20 @@ func TestMissingTrieNodes(t *testing.T) {
func testMissingTrieNodes(t *testing.T, scheme string) {
// Create an initial state with a few accounts
var (
- triedb *trie.Database
- memDb = rawdb.NewMemoryDatabase()
+ tdb *triedb.Database
+ memDb = rawdb.NewMemoryDatabase()
)
if scheme == rawdb.PathScheme {
- triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{
+ tdb = triedb.NewDatabase(memDb, &triedb.Config{PathDB: &pathdb.Config{
CleanCacheSize: 0,
DirtyCacheSize: 0,
}}) // disable caching
} else {
- triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{
+ tdb = triedb.NewDatabase(memDb, &triedb.Config{HashDB: &hashdb.Config{
CleanCacheSize: 0,
}}) // disable caching
}
- db := NewDatabaseWithNodeDB(memDb, triedb)
+ db := NewDatabaseWithNodeDB(memDb, tdb)
var root common.Hash
state, _ := New(types.EmptyRootHash, db, nil)
@@ -837,7 +838,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
root, _, _ = state.Commit(0, nil)
t.Logf("root: %x", root)
// force-flush
- triedb.Commit(root, false)
+ tdb.Commit(root, false)
}
// Create a new state on the old root
state, _ = New(root, db, nil)
@@ -1046,7 +1047,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
// Create a state trie with many accounts and slots
var (
memdb = rawdb.NewMemoryDatabase()
- triedb = trie.NewDatabase(memdb, nil)
+ triedb = triedb.NewDatabase(memdb, nil)
statedb = NewDatabaseWithNodeDB(memdb, triedb)
state, _ = New(types.EmptyRootHash, statedb, nil)
)
@@ -1119,7 +1120,7 @@ func TestStateDBTransientStorage(t *testing.T) {
func TestResetObject(t *testing.T) {
var (
disk = rawdb.NewMemoryDatabase()
- tdb = trie.NewDatabase(disk, nil)
+ tdb = triedb.NewDatabase(disk, nil)
db = NewDatabaseWithNodeDB(disk, tdb)
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash, 128, false)
state, _ = New(types.EmptyRootHash, db, snaps)
@@ -1155,7 +1156,7 @@ func TestResetObject(t *testing.T) {
func TestDeleteStorage(t *testing.T) {
var (
disk = rawdb.NewMemoryDatabase()
- tdb = trie.NewDatabase(disk, nil)
+ tdb = triedb.NewDatabase(disk, nil)
db = NewDatabaseWithNodeDB(disk, tdb)
snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash, 128, false)
state, _ = New(types.EmptyRootHash, db, snaps)
diff --git a/core/state/sync.go b/core/state/sync.go
index d6775e8896..411b54eab0 100644
--- a/core/state/sync.go
+++ b/core/state/sync.go
@@ -24,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/trie"
)
-// NewStateSync create a new state trie download scheduler.
+// NewStateSync creates a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(keys [][]byte, leaf []byte) error, scheme string) *trie.Sync {
// Register the storage slot callback if the external callback is specified.
var onSlot func(keys [][]byte, path []byte, leaf []byte, parent common.Hash, parentPath []byte) error
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index 70cd8fcb6d..adb3ff9495 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -27,8 +27,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
)
@@ -41,16 +42,16 @@ type testAccount struct {
}
// makeTestState create a sample test state to test node-wise reconstruction.
-func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) {
+func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, common.Hash, []*testAccount) {
// Create an empty state
- config := &trie.Config{Preimages: true}
+ config := &triedb.Config{Preimages: true}
if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults
} else {
config.HashDB = hashdb.Defaults
}
db := rawdb.NewMemoryDatabase()
- nodeDb := trie.NewDatabase(db, config)
+ nodeDb := triedb.NewDatabase(db, config)
sdb := NewDatabaseWithNodeDB(db, nodeDb)
state, _ := New(types.EmptyRootHash, sdb, nil)
@@ -89,7 +90,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, com
// checkStateAccounts cross references a reconstructed state with an expected
// account array.
func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) {
- var config trie.Config
+ var config triedb.Config
if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults
}
@@ -116,7 +117,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root com
// checkStateConsistency checks that all data of a state root is present.
func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error {
- config := &trie.Config{Preimages: true}
+ config := &triedb.Config{Preimages: true}
if scheme == rawdb.PathScheme {
config.PathDB = pathdb.Defaults
}
@@ -132,8 +133,8 @@ func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) e
// Tests that an empty state is not scheduled for syncing.
func TestEmptyStateSync(t *testing.T) {
- dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)
- dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults})
+ dbA := triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dbB := triedb.NewDatabase(rawdb.NewMemoryDatabase(), &triedb.Config{PathDB: pathdb.Defaults})
sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme())
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
@@ -239,7 +240,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
stTrie, err := trie.New(id, ndb)
if err != nil {
- t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
+ t.Fatalf("failed to retrieve storage trie for path %x: %v", node.syncPath[1], err)
}
data, _, err := stTrie.GetNode(node.syncPath[1])
if err != nil {
diff --git a/core/state_prefetcher_test.go b/core/state_prefetcher_test.go
index b7224a0b36..b1c5974151 100644
--- a/core/state_prefetcher_test.go
+++ b/core/state_prefetcher_test.go
@@ -19,7 +19,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/google/pprof/profile"
)
@@ -37,7 +37,7 @@ func TestPrefetchLeaking(t *testing.T) {
Alloc: GenesisAlloc{address: {Balance: funds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- triedb = trie.NewDatabase(gendb, nil)
+ triedb = triedb.NewDatabase(gendb, nil)
genesis = gspec.MustCommit(gendb, triedb)
signer = types.LatestSigner(gspec.Config)
)
@@ -80,7 +80,7 @@ func CheckNoGoroutines(key, value string) error {
var pb bytes.Buffer
profiler := pprof.Lookup("goroutine")
if profiler == nil {
- return fmt.Errorf("unable to find profile")
+ return errors.New("unable to find profile")
}
err := profiler.WriteTo(&pb, 0)
if err != nil {
diff --git a/core/state_processor.go b/core/state_processor.go
index f7d8f2c08b..da66e1747e 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -76,7 +76,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
lastBlock := p.bc.GetBlockByHash(block.ParentHash())
if lastBlock == nil {
- return statedb, nil, nil, 0, fmt.Errorf("could not get parent block")
+ return statedb, nil, nil, 0, errors.New("could not get parent block")
}
if !p.config.IsFeynman(block.Number(), block.Time()) {
// Handle upgrade build-in system contract code
diff --git a/core/state_processor_test.go b/core/state_processor_test.go
index dd4919c950..f87997c7ed 100644
--- a/core/state_processor_test.go
+++ b/core/state_processor_test.go
@@ -122,12 +122,12 @@ func TestStateProcessorErrors(t *testing.T) {
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{
Config: config,
- Alloc: GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
+ Alloc: types.GenesisAlloc{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
- common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): GenesisAccount{
+ common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: math.MaxUint64,
},
@@ -286,8 +286,8 @@ func TestStateProcessorErrors(t *testing.T) {
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
},
- Alloc: GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
+ Alloc: types.GenesisAlloc{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
@@ -324,8 +324,8 @@ func TestStateProcessorErrors(t *testing.T) {
db = rawdb.NewMemoryDatabase()
gspec = &Genesis{
Config: config,
- Alloc: GenesisAlloc{
- common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
+ Alloc: types.GenesisAlloc{
+ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
Code: common.FromHex("0xB0B0FACE"),
@@ -419,7 +419,9 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
header.BlobGasUsed = &used
beaconRoot := common.HexToHash("0xbeac00")
- header.ParentBeaconRoot = &beaconRoot
+ if config.Parlia == nil {
+ header.ParentBeaconRoot = &beaconRoot
+ }
}
// Assemble and return the final block for sealing
if config.IsShanghai(header.Number, header.Time) {
diff --git a/core/state_transition.go b/core/state_transition.go
index 3b165f9ee8..e5323005e3 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -17,6 +17,7 @@
package core
import (
+ "errors"
"fmt"
"math"
"math/big"
@@ -68,7 +69,7 @@ func (result *ExecutionResult) Revert() []byte {
}
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
-func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool, isEIP3860 bool) (uint64, error) {
+func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860 bool) (uint64, error) {
// Set the starting gas for the raw transaction
var gas uint64
if isContractCreation && isHomestead {
@@ -397,10 +398,10 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
if st.evm.ChainConfig().IsNano(st.evm.Context.BlockNumber) {
for _, blackListAddr := range types.NanoBlackList {
if blackListAddr == msg.From {
- return nil, fmt.Errorf("block blacklist account")
+ return nil, errors.New("block blacklist account")
}
if msg.To != nil && *msg.To == blackListAddr {
- return nil, fmt.Errorf("block blacklist account")
+ return nil, errors.New("block blacklist account")
}
}
}
diff --git a/core/txindexer.go b/core/txindexer.go
index 61de41947c..70fe5f3322 100644
--- a/core/txindexer.go
+++ b/core/txindexer.go
@@ -127,9 +127,10 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
// Listening to chain events and manipulate the transaction indexes.
var (
- stop chan struct{} // Non-nil if background routine is active.
- done chan struct{} // Non-nil if background routine is active.
- lastHead uint64 // The latest announced chain head (whose tx indexes are assumed created)
+ stop chan struct{} // Non-nil if background routine is active.
+ done chan struct{} // Non-nil if background routine is active.
+ lastHead uint64 // The latest announced chain head (whose tx indexes are assumed created)
+ lastTail = rawdb.ReadTxIndexTail(indexer.db) // The oldest indexed block, nil means nothing indexed
headCh = make(chan ChainHeadEvent)
sub = chain.SubscribeChainHeadEvent(headCh)
@@ -156,8 +157,9 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
case <-done:
stop = nil
done = nil
+ lastTail = rawdb.ReadTxIndexTail(indexer.db)
case ch := <-indexer.progress:
- ch <- indexer.report(lastHead)
+ ch <- indexer.report(lastHead, lastTail)
case ch := <-indexer.term:
if stop != nil {
close(stop)
@@ -173,11 +175,7 @@ func (indexer *txIndexer) loop(chain *BlockChain) {
}
// report returns the tx indexing progress.
-func (indexer *txIndexer) report(head uint64) TxIndexProgress {
- var (
- remaining uint64
- tail = rawdb.ReadTxIndexTail(indexer.db)
- )
+func (indexer *txIndexer) report(head uint64, tail *uint64) TxIndexProgress {
total := indexer.limit
if indexer.limit == 0 || total > head {
total = head + 1 // genesis included
@@ -188,6 +186,7 @@ func (indexer *txIndexer) report(head uint64) TxIndexProgress {
}
// The value of indexed might be larger than total if some blocks need
// to be unindexed, avoiding a negative remaining.
+ var remaining uint64
if indexed < total {
remaining = total - indexed
}
diff --git a/core/txindexer_test.go b/core/txindexer_test.go
index b18ebe6cbe..b59d2a841a 100644
--- a/core/txindexer_test.go
+++ b/core/txindexer_test.go
@@ -39,7 +39,7 @@ func TestTxIndexer(t *testing.T) {
gspec = &Genesis{
Config: params.TestChainConfig,
- Alloc: GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
+ Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
engine = ethash.NewFaker()
@@ -85,7 +85,7 @@ func TestTxIndexer(t *testing.T) {
for number := *tail; number <= chainHead; number += 1 {
verifyIndexes(db, number, true)
}
- progress := indexer.report(chainHead)
+ progress := indexer.report(chainHead, tail)
if !progress.Done() {
t.Fatalf("Expect fully indexed")
}
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index c9f72b5ea8..a0504123f5 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -268,7 +268,7 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
// going up, crossing the smaller positive jump counter). As such, the pool
// cares only about the min of the two delta values for eviction priority.
//
-// priority = min(delta-basefee, delta-blobfee)
+// priority = min(deltaBasefee, deltaBlobfee)
//
// - The above very aggressive dimensionality and noise reduction should result
// in transaction being grouped into a small number of buckets, the further
@@ -280,7 +280,7 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
// with high fee caps since it could enable pool wars. As such, any positive
// priority will be grouped together.
//
-// priority = min(delta-basefee, delta-blobfee, 0)
+// priority = min(deltaBasefee, deltaBlobfee, 0)
//
// Optimisation tradeoffs:
//
@@ -344,7 +344,7 @@ func (p *BlobPool) Filter(tx *types.Transaction) bool {
// Init sets the gas price needed to keep a transaction in the pool and the chain
// head to allow balance / nonce checks. The transaction journal will be loaded
// from disk and filtered based on the provided starting settings.
-func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
p.reserve = reserve
var (
@@ -362,7 +362,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
}
}
// Initialize the state with head block, or fallback to empty one in
- // case the head state is not available(might occur when node is not
+ // case the head state is not available (might occur when node is not
// fully synced).
state, err := p.chain.StateAt(head.Root)
if err != nil {
@@ -373,14 +373,14 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
}
p.head, p.state = head, state
- // Index all transactions on disk and delete anything inprocessable
+ // Index all transactions on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, blob []byte) {
if p.parseTransaction(id, size, blob) != nil {
fails = append(fails, id)
}
}
- store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index)
+ store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, newSlotter(), index)
if err != nil {
return err
}
@@ -388,6 +388,8 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
if len(fails) > 0 {
log.Warn("Dropping invalidated blob transactions", "ids", fails)
+ dropInvalidMeter.Mark(int64(len(fails)))
+
for _, id := range fails {
if err := p.store.Delete(id); err != nil {
p.Close()
@@ -402,7 +404,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
}
var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
- blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
+ blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
if p.head.ExcessBlobGas != nil {
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas))
@@ -420,7 +422,7 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
basefeeGauge.Update(int64(basefee.Uint64()))
blobfeeGauge.Update(int64(blobfee.Uint64()))
- p.SetGasTip(gasTip)
+ p.SetGasTip(new(big.Int).SetUint64(gasTip))
// Since the user might have modified their pool's capacity, evict anything
// above the current allowance
@@ -436,8 +438,10 @@ func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.Addr
// Close closes down the underlying persistent store.
func (p *BlobPool) Close() error {
var errs []error
- if err := p.limbo.Close(); err != nil {
- errs = append(errs, err)
+ if p.limbo != nil { // Close might be invoked due to error in constructor, before p,limbo is set
+ if err := p.limbo.Close(); err != nil {
+ errs = append(errs, err)
+ }
}
if err := p.store.Close(); err != nil {
errs = append(errs, err)
@@ -458,7 +462,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
tx := new(types.Transaction)
if err := rlp.DecodeBytes(blob, tx); err != nil {
// This path is impossible unless the disk data representation changes
- // across restarts. For that ever unprobable case, recover gracefully
+ // across restarts. For that ever improbable case, recover gracefully
// by ignoring this data entry.
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
return err
@@ -469,11 +473,17 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
}
meta := newBlobTxMeta(id, size, tx)
-
+ if _, exists := p.lookup[meta.hash]; exists {
+ // This path is only possible after a crash, where deleted items are not
+ // removed via the normal shutdown-startup procedure and thus may get
+ // partially resurrected.
+ log.Error("Rejecting duplicate blob pool entry", "id", id, "hash", tx.Hash())
+ return errors.New("duplicate blob entry")
+ }
sender, err := p.signer.Sender(tx)
if err != nil {
// This path is impossible unless the signature validity changes across
- // restarts. For that ever unprobable case, recover gracefully by ignoring
+ // restarts. For that ever improbable case, recover gracefully by ignoring
// this data entry.
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
return err
@@ -532,15 +542,17 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
}
delete(p.index, addr)
delete(p.spent, addr)
- if inclusions != nil { // only during reorgs will the heap will be initialized
+ if inclusions != nil { // only during reorgs will the heap be initialized
heap.Remove(p.evict, p.evict.index[addr])
}
p.reserve(addr, false)
if gapped {
log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids)
+ dropDanglingMeter.Mark(int64(len(ids)))
} else {
log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids)
+ dropFilledMeter.Mark(int64(len(ids)))
}
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
@@ -571,6 +583,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
txs = txs[1:]
}
log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs))
+ dropOverlappedMeter.Mark(int64(len(ids)))
+
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
@@ -602,10 +616,30 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
}
continue
}
- // Sanity check that there's no double nonce. This case would be a coding
- // error, but better know about it
+ // Sanity check that there's no double nonce. This case would generally
+ // be a coding error, so better know about it.
+ //
+ // Also, Billy behind the blobpool does not journal deletes. A process
+ // crash would result in previously deleted entities being resurrected.
+ // That could potentially cause a duplicate nonce to appear.
if txs[i].nonce == txs[i-1].nonce {
- log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce)
+ id := p.lookup[txs[i].hash]
+
+ log.Error("Dropping repeat nonce blob transaction", "from", addr, "nonce", txs[i].nonce, "id", id)
+ dropRepeatedMeter.Mark(1)
+
+ p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap)
+ p.stored -= uint64(txs[i].size)
+ delete(p.lookup, txs[i].hash)
+
+ if err := p.store.Delete(id); err != nil {
+ log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
+ }
+ txs = append(txs[:i], txs[i+1:]...)
+ p.index[addr] = txs
+
+ i--
+ continue
}
// Otherwise if there's a nonce gap evict all later transactions
var (
@@ -623,6 +657,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
txs = txs[:i]
log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids)
+ dropGappedMeter.Mark(int64(len(ids)))
+
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
@@ -659,7 +695,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
if len(txs) == 0 {
delete(p.index, addr)
delete(p.spent, addr)
- if inclusions != nil { // only during reorgs will the heap will be initialized
+ if inclusions != nil { // only during reorgs will the heap be initialized
heap.Remove(p.evict, p.evict.index[addr])
}
p.reserve(addr, false)
@@ -667,6 +703,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
p.index[addr] = txs
}
log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids)
+ dropOverdraftedMeter.Mark(int64(len(ids)))
+
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
@@ -697,6 +735,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
p.index[addr] = txs
log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids)
+ dropOvercappedMeter.Mark(int64(len(ids)))
+
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
@@ -713,7 +753,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
// offload removes a tracked blob transaction from the pool and moves it into the
// limbo for tracking until finality.
//
-// The method may log errors for various unexpcted scenarios but will not return
+// The method may log errors for various unexpected scenarios but will not return
// any of it since there's no clear error case. Some errors may be due to coding
// issues, others caused by signers mining MEV stuff or swapping transactions. In
// all cases, the pool needs to continue operating.
@@ -771,7 +811,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
}
}
// Recheck the account's pooled transactions to drop included and
- // invalidated one
+ // invalidated ones
p.recheck(addr, inclusions)
}
if len(adds) > 0 {
@@ -954,7 +994,7 @@ func (p *BlobPool) reinject(addr common.Address, txhash common.Hash) error {
return err
}
- // Update the indixes and metrics
+ // Update the indices and metrics
meta := newBlobTxMeta(id, p.store.Size(id), tx)
if _, ok := p.index[addr]; !ok {
if err := p.reserve(addr, true); err != nil {
@@ -1021,6 +1061,8 @@ func (p *BlobPool) SetGasTip(tip *big.Int) {
}
// Clear out the transactions from the data store
log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids)
+ dropUnderpricedMeter.Mark(int64(len(ids)))
+
for _, id := range ids {
if err := p.store.Delete(id); err != nil {
log.Error("Failed to delete dropped transaction", "id", id, "err", err)
@@ -1163,7 +1205,7 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
}
// Add inserts a set of blob transactions into the pool if they pass validation (both
-// consensus validity and pool restictions).
+// consensus validity and pool restrictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
var (
adds = make([]*types.Transaction, 0, len(txs))
@@ -1183,10 +1225,10 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
}
// Add inserts a new blob transaction into the pool if it passes validation (both
-// consensus validity and pool restictions).
+// consensus validity and pool restrictions).
func (p *BlobPool) add(tx *types.Transaction) (err error) {
// The blob pool blocks on adding a transaction. This is because blob txs are
- // only even pulled form the network, so this method will act as the overload
+ // only even pulled from the network, so this method will act as the overload
// protection for fetches.
waitStart := time.Now()
p.lock.Lock()
@@ -1200,6 +1242,22 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
// Ensure the transaction is valid from all perspectives
if err := p.validateTx(tx); err != nil {
log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err)
+ switch {
+ case errors.Is(err, txpool.ErrUnderpriced):
+ addUnderpricedMeter.Mark(1)
+ case errors.Is(err, core.ErrNonceTooLow):
+ addStaleMeter.Mark(1)
+ case errors.Is(err, core.ErrNonceTooHigh):
+ addGappedMeter.Mark(1)
+ case errors.Is(err, core.ErrInsufficientFunds):
+ addOverdraftedMeter.Mark(1)
+ case errors.Is(err, txpool.ErrAccountLimitExceeded):
+ addOvercappedMeter.Mark(1)
+ case errors.Is(err, txpool.ErrReplaceUnderpriced):
+ addNoreplaceMeter.Mark(1)
+ default:
+ addInvalidMeter.Mark(1)
+ }
return err
}
// If the address is not yet known, request exclusivity to track the account
@@ -1207,6 +1265,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
from, _ := types.Sender(p.signer, tx) // already validated above
if _, ok := p.index[from]; !ok {
if err := p.reserve(from, true); err != nil {
+ addNonExclusiveMeter.Mark(1)
return err
}
defer func() {
@@ -1246,6 +1305,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
}
if len(p.index[from]) > offset {
// Transaction replaces a previously queued one
+ dropReplacedMeter.Mark(1)
+
prev := p.index[from][offset]
if err := p.store.Delete(prev.id); err != nil {
// Shitty situation, but try to recover gracefully instead of going boom
@@ -1324,6 +1385,7 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
}
p.updateStorageMetrics()
+ addValidMeter.Mark(1)
return nil
}
@@ -1373,7 +1435,9 @@ func (p *BlobPool) drop() {
}
}
// Remove the transaction from the data store
- log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
+ log.Debug("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id)
+ dropOverflownMeter.Mark(1)
+
if err := p.store.Delete(drop.id); err != nil {
log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err)
}
@@ -1381,7 +1445,15 @@ func (p *BlobPool) drop() {
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
-func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+//
+// The transactions can also be pre-filtered by the dynamic fee components to
+// reduce allocations and load on downstream subsystems.
+func (p *BlobPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
+ // If only plain transactions are requested, this pool is unsuitable as it
+ // contains none, don't even bother.
+ if filter.OnlyPlainTxs {
+ return nil
+ }
// Track the amount of time waiting to retrieve the list of pending blob txs
// from the pool and the amount of time actually spent on assembling the data.
// The latter will be pretty much moot, but we've kept it to have symmetric
@@ -1391,20 +1463,40 @@ func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTr
pendwaitHist.Update(time.Since(pendStart).Nanoseconds())
defer p.lock.RUnlock()
- defer func(start time.Time) {
- pendtimeHist.Update(time.Since(start).Nanoseconds())
- }(time.Now())
+ execStart := time.Now()
+ defer func() {
+ pendtimeHist.Update(time.Since(execStart).Nanoseconds())
+ }()
- pending := make(map[common.Address][]*txpool.LazyTransaction)
+ pending := make(map[common.Address][]*txpool.LazyTransaction, len(p.index))
for addr, txs := range p.index {
- var lazies []*txpool.LazyTransaction
+ lazies := make([]*txpool.LazyTransaction, 0, len(txs))
for _, tx := range txs {
+ // If transaction filtering was requested, discard badly priced ones
+ if filter.MinTip != nil && filter.BaseFee != nil {
+ if tx.execFeeCap.Lt(filter.BaseFee) {
+ break // basefee too low, cannot be included, discard rest of txs from the account
+ }
+ tip := new(uint256.Int).Sub(tx.execFeeCap, filter.BaseFee)
+ if tip.Gt(tx.execTipCap) {
+ tip = tx.execTipCap
+ }
+ if tip.Lt(filter.MinTip) {
+ break // allowed or remaining tip too low, cannot be included, discard rest of txs from the account
+ }
+ }
+ if filter.BlobFee != nil {
+ if tx.blobFeeCap.Lt(filter.BlobFee) {
+ break // blobfee too low, cannot be included, discard rest of txs from the account
+ }
+ }
+ // Transaction was accepted according to the filter, append to the pending list
lazies = append(lazies, &txpool.LazyTransaction{
Pool: p,
Hash: tx.hash,
- Time: time.Now(), // TODO(karalabe): Maybe save these and use that?
- GasFeeCap: tx.execFeeCap.ToBig(),
- GasTipCap: tx.execTipCap.ToBig(),
+ Time: execStart, // TODO(karalabe): Maybe save these and use that?
+ GasFeeCap: tx.execFeeCap,
+ GasTipCap: tx.execTipCap,
Gas: tx.execGas,
BlobGas: tx.blobGas,
})
@@ -1464,7 +1556,7 @@ func (p *BlobPool) updateStorageMetrics() {
}
// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes
-// // them out as metrics.
+// them out as metrics.
func (p *BlobPool) updateLimboMetrics() {
stats := p.limbo.store.Infos()
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index e4fc67f352..3e465a9214 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -185,7 +185,7 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
}
-// makeUnsignedTx is a utility method to construct a random blob tranasaction
+// makeUnsignedTx is a utility method to construct a random blob transaction
// without signing it.
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
return &types.BlobTx{
@@ -305,7 +305,16 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
// - 1. A transaction that cannot be decoded must be dropped
// - 2. A transaction that cannot be recovered (bad signature) must be dropped
// - 3. All transactions after a nonce gap must be dropped
-// - 4. All transactions after an underpriced one (including it) must be dropped
+// - 4. All transactions after an already included nonce must be dropped
+// - 5. All transactions after an underpriced one (including it) must be dropped
+// - 6. All transactions after an overdrafting sequence must be dropped
+// - 7. All transactions exceeding the per-account limit must be dropped
+//
+// Furthermore, some strange corner-cases can also occur after a crash, as Billy's
+// simplicity also allows it to resurrect past deleted entities:
+//
+// - 8. Fully duplicate transactions (matching hash) must be dropped
+// - 9. Duplicate nonces from the same account must be dropped
func TestOpenDrops(t *testing.T) {
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
@@ -338,7 +347,7 @@ func TestOpenDrops(t *testing.T) {
badsig, _ := store.Put(blob)
// Insert a sequence of transactions with a nonce gap in between to verify
- // that anything gapped will get evicted (case 3)
+ // that anything gapped will get evicted (case 3).
var (
gapper, _ = crypto.GenerateKey()
@@ -357,7 +366,7 @@ func TestOpenDrops(t *testing.T) {
}
}
// Insert a sequence of transactions with a gapped starting nonce to verify
- // that the entire set will get dropped.
+ // that the entire set will get dropped (case 3).
var (
dangler, _ = crypto.GenerateKey()
dangling = make(map[uint64]struct{})
@@ -370,7 +379,7 @@ func TestOpenDrops(t *testing.T) {
dangling[id] = struct{}{}
}
// Insert a sequence of transactions with already passed nonces to veirfy
- // that the entire set will get dropped.
+ // that the entire set will get dropped (case 4).
var (
filler, _ = crypto.GenerateKey()
filled = make(map[uint64]struct{})
@@ -382,8 +391,8 @@ func TestOpenDrops(t *testing.T) {
id, _ := store.Put(blob)
filled[id] = struct{}{}
}
- // Insert a sequence of transactions with partially passed nonces to veirfy
- // that the included part of the set will get dropped
+ // Insert a sequence of transactions with partially passed nonces to verify
+ // that the included part of the set will get dropped (case 4).
var (
overlapper, _ = crypto.GenerateKey()
overlapped = make(map[uint64]struct{})
@@ -400,7 +409,7 @@ func TestOpenDrops(t *testing.T) {
}
}
// Insert a sequence of transactions with an underpriced first to verify that
- // the entire set will get dropped (case 4).
+ // the entire set will get dropped (case 5).
var (
underpayer, _ = crypto.GenerateKey()
underpaid = make(map[uint64]struct{})
@@ -419,7 +428,7 @@ func TestOpenDrops(t *testing.T) {
}
// Insert a sequence of transactions with an underpriced in between to verify
- // that it and anything newly gapped will get evicted (case 4).
+ // that it and anything newly gapped will get evicted (case 5).
var (
outpricer, _ = crypto.GenerateKey()
outpriced = make(map[uint64]struct{})
@@ -441,7 +450,7 @@ func TestOpenDrops(t *testing.T) {
}
}
// Insert a sequence of transactions fully overdrafted to verify that the
- // entire set will get invalidated.
+ // entire set will get invalidated (case 6).
var (
exceeder, _ = crypto.GenerateKey()
exceeded = make(map[uint64]struct{})
@@ -459,7 +468,7 @@ func TestOpenDrops(t *testing.T) {
exceeded[id] = struct{}{}
}
// Insert a sequence of transactions partially overdrafted to verify that part
- // of the set will get invalidated.
+ // of the set will get invalidated (case 6).
var (
overdrafter, _ = crypto.GenerateKey()
overdrafted = make(map[uint64]struct{})
@@ -481,7 +490,7 @@ func TestOpenDrops(t *testing.T) {
}
}
// Insert a sequence of transactions overflowing the account cap to verify
- // that part of the set will get invalidated.
+ // that part of the set will get invalidated (case 7).
var (
overcapper, _ = crypto.GenerateKey()
overcapped = make(map[uint64]struct{})
@@ -496,6 +505,42 @@ func TestOpenDrops(t *testing.T) {
overcapped[id] = struct{}{}
}
}
+ // Insert a batch of duplicated transactions to verify that only one of each
+ // version will remain (case 8).
+ var (
+ duplicater, _ = crypto.GenerateKey()
+ duplicated = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} {
+ blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, 1, 1, duplicater))
+
+ for i := 0; i < int(nonce)+1; i++ {
+ id, _ := store.Put(blob)
+ if i == 0 {
+ valids[id] = struct{}{}
+ } else {
+ duplicated[id] = struct{}{}
+ }
+ }
+ }
+ // Insert a batch of duplicated nonces to verify that only one of each will
+ // remain (case 9).
+ var (
+ repeater, _ = crypto.GenerateKey()
+ repeated = make(map[uint64]struct{})
+ )
+ for _, nonce := range []uint64{0, 1, 2} {
+ for i := 0; i < int(nonce)+1; i++ {
+ blob, _ := rlp.EncodeToBytes(makeTx(nonce, 1, uint64(i)+1 /* unique hashes */, 1, repeater))
+
+ id, _ := store.Put(blob)
+ if i == 0 {
+ valids[id] = struct{}{}
+ } else {
+ repeated[id] = struct{}{}
+ }
+ }
+ }
store.Close()
// Create a blob pool out of the pre-seeded data
@@ -511,6 +556,8 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000))
statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), uint256.NewInt(1000000))
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000))
+ statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000))
statedb.Finalise(true)
statedb.AccountsIntermediateRoot()
statedb.Commit(0, nil)
@@ -522,7 +569,7 @@ func TestOpenDrops(t *testing.T) {
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain)
- if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
defer pool.Close()
@@ -556,6 +603,10 @@ func TestOpenDrops(t *testing.T) {
t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id)
} else if _, ok := overcapped[tx.id]; ok {
t.Errorf("overcapped transaction remained in storage: %d", tx.id)
+ } else if _, ok := duplicated[tx.id]; ok {
+ t.Errorf("duplicated transaction remained in storage: %d", tx.id)
+ } else if _, ok := repeated[tx.id]; ok {
+ t.Errorf("repeated nonce transaction remained in storage: %d", tx.id)
} else {
alive[tx.id] = struct{}{}
}
@@ -586,7 +637,7 @@ func TestOpenDrops(t *testing.T) {
// Tests that transactions loaded from disk are indexed correctly.
//
-// - 1. Transactions must be groupped by sender, sorted by nonce
+// - 1. Transactions must be grouped by sender, sorted by nonce
// - 2. Eviction thresholds are calculated correctly for the sequences
// - 3. Balance usage of an account is totals across all transactions
func TestOpenIndex(t *testing.T) {
@@ -600,7 +651,7 @@ func TestOpenIndex(t *testing.T) {
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
// Insert a sequence of transactions with varying price points to check that
- // the cumulative minimumw will be maintained.
+ // the cumulative minimum will be maintained.
var (
key, _ = crypto.GenerateKey()
addr = crypto.PubkeyToAddress(key.PublicKey)
@@ -639,7 +690,7 @@ func TestOpenIndex(t *testing.T) {
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain)
- if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
defer pool.Close()
@@ -743,7 +794,7 @@ func TestOpenHeap(t *testing.T) {
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain)
- if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
defer pool.Close()
@@ -825,7 +876,7 @@ func TestOpenCap(t *testing.T) {
statedb: statedb,
}
pool := New(Config{Datadir: storage, Datacap: datacap}, chain)
- if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
// Verify that enough transactions have been dropped to get the pool's size
@@ -1185,6 +1236,24 @@ func TestAdd(t *testing.T) {
},
},
},
+ // Blob transactions that don't meet the min blob gas price should be rejected
+ {
+ seeds: map[string]seed{
+ "alice": {balance: 10000000},
+ },
+ adds: []addtx{
+ { // New account, no previous txs, nonce 0, but blob fee cap too low
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, 0),
+ err: txpool.ErrUnderpriced,
+ },
+ { // Same as above but blob fee cap equals minimum, should be accepted
+ from: "alice",
+ tx: makeUnsignedTx(0, 1, 1, params.BlobTxMinBlobGasprice),
+ err: nil,
+ },
+ },
+ },
}
for i, tt := range tests {
// Create a temporary folder for the persistent backend
@@ -1205,7 +1274,7 @@ func TestAdd(t *testing.T) {
keys[acc], _ = crypto.GenerateKey()
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
- // Seed the state database with this acocunt
+ // Seed the state database with this account
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
statedb.SetNonce(addrs[acc], seed.nonce)
@@ -1229,7 +1298,7 @@ func TestAdd(t *testing.T) {
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain)
- if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("test %d: failed to create blob pool: %v", i, err)
}
verifyPoolInternals(t, pool)
@@ -1247,3 +1316,67 @@ func TestAdd(t *testing.T) {
pool.Close()
}
}
+
+// Benchmarks the time it takes to assemble the lazy pending transaction list
+// from the pool contents.
+func BenchmarkPoolPending100Mb(b *testing.B) { benchmarkPoolPending(b, 100_000_000) }
+func BenchmarkPoolPending1GB(b *testing.B) { benchmarkPoolPending(b, 1_000_000_000) }
+func BenchmarkPoolPending10GB(b *testing.B) { benchmarkPoolPending(b, 10_000_000_000) }
+
+func benchmarkPoolPending(b *testing.B, datacap uint64) {
+ // Calculate the maximum number of transaction that would fit into the pool
+ // and generate a set of random accounts to seed them with.
+ capacity := datacap / params.BlobTxBlobGasPerBlob
+
+ var (
+ basefee = uint64(1050)
+ blobfee = uint64(105)
+ signer = types.LatestSigner(testChainConfig)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
+ chain = &testBlockChain{
+ config: testChainConfig,
+ basefee: uint256.NewInt(basefee),
+ blobfee: uint256.NewInt(blobfee),
+ statedb: statedb,
+ }
+ pool = New(Config{Datadir: ""}, chain)
+ )
+
+ if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
+ b.Fatalf("failed to create blob pool: %v", err)
+ }
+ // Fill the pool up with one random transaction from each account with the
+ // same price and everything to maximize the worst case scenario
+ for i := 0; i < int(capacity); i++ {
+ blobtx := makeUnsignedTx(0, 10, basefee+10, blobfee)
+ blobtx.R = uint256.NewInt(1)
+ blobtx.S = uint256.NewInt(uint64(100 + i))
+ blobtx.V = uint256.NewInt(0)
+ tx := types.NewTx(blobtx)
+ addr, err := types.Sender(signer, tx)
+ if err != nil {
+ b.Fatal(err)
+ }
+ statedb.AddBalance(addr, uint256.NewInt(1_000_000_000))
+ pool.add(tx)
+ }
+ statedb.Finalise(true)
+ statedb.AccountsIntermediateRoot()
+ statedb.Commit(0, nil)
+ defer pool.Close()
+
+ // Benchmark assembling the pending
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ p := pool.Pending(txpool.PendingFilter{
+ MinTip: uint256.NewInt(1),
+ BaseFee: chain.basefee,
+ BlobFee: chain.blobfee,
+ })
+ if len(p) != int(capacity) {
+ b.Fatalf("have %d want %d", len(p), capacity)
+ }
+ }
+}
diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go
index 99a2002a30..1d180739cd 100644
--- a/core/txpool/blobpool/config.go
+++ b/core/txpool/blobpool/config.go
@@ -30,8 +30,8 @@ type Config struct {
// DefaultConfig contains the default configurations for the transaction pool.
var DefaultConfig = Config{
Datadir: "blobpool",
- Datacap: 10 * 1024 * 1024 * 1024,
- PriceBump: 100, // either have patience or be aggressive, no mushy ground
+ Datacap: 10 * 1024 * 1024 * 1024 / 4, // TODO(karalabe): /4 handicap for rollout, gradually bump back up to 10GB
+ PriceBump: 100, // either have patience or be aggressive, no mushy ground
}
// sanitize checks the provided user configurations and changes anything that's
diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go
index df594099f7..bc4543a352 100644
--- a/core/txpool/blobpool/evictheap.go
+++ b/core/txpool/blobpool/evictheap.go
@@ -30,7 +30,7 @@ import (
// transaction from each account to determine which account to evict from.
//
// The heap internally tracks a slice of cheapest transactions from each account
-// and a mapping from addresses to indices for direct removals/udates.
+// and a mapping from addresses to indices for direct removals/updates.
//
// The goal of the heap is to decide which account has the worst bottleneck to
// evict transactions from.
diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go
index d1fe9c7394..ec754f6894 100644
--- a/core/txpool/blobpool/limbo.go
+++ b/core/txpool/blobpool/limbo.go
@@ -53,7 +53,7 @@ func newLimbo(datadir string) (*limbo, error) {
index: make(map[common.Hash]uint64),
groups: make(map[uint64]map[uint64]common.Hash),
}
- // Index all limboed blobs on disk and delete anything inprocessable
+ // Index all limboed blobs on disk and delete anything unprocessable
var fails []uint64
index := func(id uint64, size uint32, data []byte) {
if l.parseBlob(id, data) != nil {
@@ -89,7 +89,7 @@ func (l *limbo) parseBlob(id uint64, data []byte) error {
item := new(limboBlob)
if err := rlp.DecodeBytes(data, item); err != nil {
// This path is impossible unless the disk data representation changes
- // across restarts. For that ever unprobable case, recover gracefully
+ // across restarts. For that ever improbable case, recover gracefully
// by ignoring this data entry.
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
return err
@@ -172,7 +172,7 @@ func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
// update changes the block number under which a blob transaction is tracked. This
// method should be used when a reorg changes a transaction's inclusion block.
//
-// The method may log errors for various unexpcted scenarios but will not return
+// The method may log errors for various unexpected scenarios but will not return
// any of it since there's no clear error case. Some errors may be due to coding
// issues, others caused by signers mining MEV stuff or swapping transactions. In
// all cases, the pool needs to continue operating.
diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go
index 587804cc61..52419ade09 100644
--- a/core/txpool/blobpool/metrics.go
+++ b/core/txpool/blobpool/metrics.go
@@ -65,8 +65,8 @@ var (
pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil)
// addwait/time, resetwait/time and getwait/time track the rough health of
- // the pool and whether or not it's capable of keeping up with the load from
- // the network.
+ // the pool and whether it's capable of keeping up with the load from the
+ // network.
addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015))
addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015))
getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015))
@@ -75,4 +75,31 @@ var (
pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015))
resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015))
resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015))
+
+ // The below metrics track various cases where transactions are dropped out
+ // of the pool. Most are exceptional, some are chain progression and some
+ // threshold cappings.
+ dropInvalidMeter = metrics.NewRegisteredMeter("blobpool/drop/invalid", nil) // Invalid transaction, consensus change or bugfix, neutral-ish
+ dropDanglingMeter = metrics.NewRegisteredMeter("blobpool/drop/dangling", nil) // First nonce gapped, bad
+ dropFilledMeter = metrics.NewRegisteredMeter("blobpool/drop/filled", nil) // State full-overlap, chain progress, ok
+ dropOverlappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overlapped", nil) // State partial-overlap, chain progress, ok
+ dropRepeatedMeter = metrics.NewRegisteredMeter("blobpool/drop/repeated", nil) // Repeated nonce, bad
+ dropGappedMeter = metrics.NewRegisteredMeter("blobpool/drop/gapped", nil) // Non-first nonce gapped, bad
+ dropOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/drop/overdrafted", nil) // Balance exceeded, bad
+ dropOvercappedMeter = metrics.NewRegisteredMeter("blobpool/drop/overcapped", nil) // Per-account cap exceeded, bad
+ dropOverflownMeter = metrics.NewRegisteredMeter("blobpool/drop/overflown", nil) // Global disk cap exceeded, neutral-ish
+ dropUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/drop/underpriced", nil) // Gas tip changed, neutral
+ dropReplacedMeter = metrics.NewRegisteredMeter("blobpool/drop/replaced", nil) // Transaction replaced, neutral
+
+ // The below metrics track various outcomes of transactions being added to
+ // the pool.
+ addInvalidMeter = metrics.NewRegisteredMeter("blobpool/add/invalid", nil) // Invalid transaction, reject, neutral
+ addUnderpricedMeter = metrics.NewRegisteredMeter("blobpool/add/underpriced", nil) // Gas tip too low, neutral
+ addStaleMeter = metrics.NewRegisteredMeter("blobpool/add/stale", nil) // Nonce already filled, reject, bad-ish
+ addGappedMeter = metrics.NewRegisteredMeter("blobpool/add/gapped", nil) // Nonce gapped, reject, bad-ish
+ addOverdraftedMeter = metrics.NewRegisteredMeter("blobpool/add/overdrafted", nil) // Balance exceeded, reject, neutral
+ addOvercappedMeter = metrics.NewRegisteredMeter("blobpool/add/overcapped", nil) // Per-account cap exceeded, reject, neutral
+ addNoreplaceMeter = metrics.NewRegisteredMeter("blobpool/add/noreplace", nil) // Replacement fees or tips too low, neutral
+ addNonExclusiveMeter = metrics.NewRegisteredMeter("blobpool/add/nonexclusive", nil) // Plain transaction from same account exists, reject, neutral
+ addValidMeter = metrics.NewRegisteredMeter("blobpool/add/valid", nil) // Valid transaction, add, neutral
)
diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go
index 4aad919925..cf0e0454a0 100644
--- a/core/txpool/blobpool/priority_test.go
+++ b/core/txpool/blobpool/priority_test.go
@@ -64,7 +64,7 @@ func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
// Benchmarks how many priority recalculations can be done.
func BenchmarkPriorityCalculation(b *testing.B) {
// The basefee and blob fee is constant for all transactions across a block,
- // so we can assume theit absolute jump counts can be pre-computed.
+ // so we can assume their absolute jump counts can be pre-computed.
basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number
blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be
diff --git a/core/txpool/errors.go b/core/txpool/errors.go
index 61daa999ff..3a6a913976 100644
--- a/core/txpool/errors.go
+++ b/core/txpool/errors.go
@@ -54,4 +54,10 @@ var (
// ErrFutureReplacePending is returned if a future transaction replaces a pending
// one. Future transactions should only be able to replace other future transactions.
ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
+
+ // ErrAlreadyReserved is returned if the sender address has a pending transaction
+ // in a different subpool. For example, this error is returned in response to any
+ // input transaction of non-blob type when a blob transaction from this sender
+ // remains pending (and vice-versa).
+ ErrAlreadyReserved = errors.New("address already reserved")
)
diff --git a/core/txpool/legacypool/journal.go b/core/txpool/legacypool/journal.go
index f04ab8fc14..899ed00bcc 100644
--- a/core/txpool/legacypool/journal.go
+++ b/core/txpool/legacypool/journal.go
@@ -164,7 +164,12 @@ func (journal *journal) rotate(all map[common.Address]types.Transactions) error
return err
}
journal.writer = sink
- log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
+
+ logger := log.Info
+ if len(all) == 0 {
+ logger = log.Debug
+ }
+ logger("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all))
return nil
}
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index b67abaa2b7..8134848f1e 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
)
const (
@@ -214,7 +215,7 @@ type LegacyPool struct {
config Config
chainconfig *params.ChainConfig
chain BlockChain
- gasTip atomic.Pointer[big.Int]
+ gasTip atomic.Pointer[uint256.Int]
txFeed event.Feed
reannoTxFeed event.Feed // Event feed for announcing transactions again
scope event.SubscriptionScope
@@ -301,15 +302,15 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
// head to allow balance / nonce checks. The transaction journal will be loaded
// from disk and filtered based on the provided starting settings. The internal
// goroutines will be spun up and the pool deemed operational afterwards.
-func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error {
+func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
// Set the address reserver to request exclusive access to pooled accounts
pool.reserve = reserve
// Set the basic pool parameters
- pool.gasTip.Store(gasTip)
+ pool.gasTip.Store(uint256.NewInt(gasTip))
// Initialize the state with head block, or fallback to empty one in
- // case the head state is not available(might occur when node is not
+ // case the head state is not available (might occur when node is not
// fully synced).
statedb, err := pool.chain.StateAt(head.Root)
if err != nil {
@@ -482,11 +483,13 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
pool.mu.Lock()
defer pool.mu.Unlock()
- old := pool.gasTip.Load()
- pool.gasTip.Store(new(big.Int).Set(tip))
-
+ var (
+ newTip = uint256.MustFromBig(tip)
+ old = pool.gasTip.Load()
+ )
+ pool.gasTip.Store(newTip)
// If the min miner fee increased, remove transactions below the new threshold
- if tip.Cmp(old) > 0 {
+ if newTip.Cmp(old) > 0 {
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
drop := pool.all.RemotesBelowTip(tip)
for _, tx := range drop {
@@ -494,7 +497,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
}
pool.priced.Removed(len(drop))
}
- log.Info("Legacy pool tip threshold updated", "tip", tip)
+ log.Info("Legacy pool tip threshold updated", "tip", newTip)
}
// Nonce returns the next nonce of an account, with all transactions executable
@@ -564,24 +567,38 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
}
// Pending retrieves all currently processable transactions, grouped by origin
-// account and sorted by nonce. The returned transaction set is a copy and can be
-// freely modified by calling code.
+// account and sorted by nonce.
//
-// The enforceTips parameter can be used to do an extra filtering on the pending
-// transactions and only return those whose **effective** tip is large enough in
-// the next pending execution environment.
-func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+// The transactions can also be pre-filtered by the dynamic fee components to
+// reduce allocations and load on downstream subsystems.
+func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
+ // If only blob transactions are requested, this pool is unsuitable as it
+ // contains none, don't even bother.
+ if filter.OnlyBlobTxs {
+ return nil
+ }
pool.mu.Lock()
defer pool.mu.Unlock()
+ // Convert the new uint256.Int types to the old big.Int ones used by the legacy pool
+ var (
+ minTipBig *big.Int
+ baseFeeBig *big.Int
+ )
+ if filter.MinTip != nil {
+ minTipBig = filter.MinTip.ToBig()
+ }
+ if filter.BaseFee != nil {
+ baseFeeBig = filter.BaseFee.ToBig()
+ }
pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending))
for addr, list := range pool.pending {
txs := list.Flatten()
// If the miner requests tip enforcement, cap the lists now
- if enforceTips && !pool.locals.contains(addr) {
+ if minTipBig != nil && !pool.locals.contains(addr) {
for i, tx := range txs {
- if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), nil) < 0 {
+ if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
txs = txs[:i]
break
}
@@ -595,8 +612,8 @@ func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.L
Hash: txs[i].Hash(),
Tx: txs[i],
Time: txs[i].Time(),
- GasFeeCap: txs[i].GasFeeCap(),
- GasTipCap: txs[i].GasTipCap(),
+ GasFeeCap: uint256.MustFromBig(txs[i].GasFeeCap()),
+ GasTipCap: uint256.MustFromBig(txs[i].GasTipCap()),
Gas: txs[i].Gas(),
BlobGas: txs[i].BlobGas(),
}
@@ -654,7 +671,7 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro
1< threshold; size-- {
drops = append(drops, m.items[(*m.index)[size-1]])
delete(m.items, (*m.index)[size-1])
}
*m.index = (*m.index)[:threshold]
- heap.Init(m.index)
+ // The sorted m.index slice is still a valid heap, so there is no need to
+ // reheap after deleting tail items.
// If we had a cache, shift the back
m.cacheMu.Lock()
@@ -297,19 +299,19 @@ type list struct {
strict bool // Whether nonces are strictly continuous or not
txs *sortedMap // Heap indexed sorted hash map of the transactions
- costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
- gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
- totalcost *big.Int // Total cost of all transactions in the list
+ costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance)
+ gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
+ totalcost *uint256.Int // Total cost of all transactions in the list
}
-// newList create a new transaction list for maintaining nonce-indexable fast,
+// newList creates a new transaction list for maintaining nonce-indexable fast,
// gapped, sortable transaction lists.
func newList(strict bool) *list {
return &list{
strict: strict,
txs: newSortedMap(),
- costcap: new(big.Int),
- totalcost: new(big.Int),
+ costcap: new(uint256.Int),
+ totalcost: new(uint256.Int),
}
}
@@ -351,10 +353,15 @@ func (l *list) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transa
l.subTotalCost([]*types.Transaction{old})
}
// Add new tx cost to totalcost
- l.totalcost.Add(l.totalcost, tx.Cost())
+ cost, overflow := uint256.FromBig(tx.Cost())
+ if overflow {
+ return false, nil
+ }
+ l.totalcost.Add(l.totalcost, cost)
+
// Otherwise overwrite the old transaction with the current one
l.txs.Put(tx)
- if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
+ if l.costcap.Cmp(cost) < 0 {
l.costcap = cost
}
if gas := tx.Gas(); l.gascap < gas {
@@ -381,17 +388,17 @@ func (l *list) Forward(threshold uint64) types.Transactions {
// a point in calculating all the costs or if the balance covers all. If the threshold
// is lower than the costgas cap, the caps will be reset to a new high after removing
// the newly invalidated transactions.
-func (l *list) Filter(costLimit *big.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
+func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactions, types.Transactions) {
// If all transactions are below the threshold, short circuit
if l.costcap.Cmp(costLimit) <= 0 && l.gascap <= gasLimit {
return nil, nil
}
- l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
+ l.costcap = new(uint256.Int).Set(costLimit) // Lower the caps to the thresholds
l.gascap = gasLimit
// Filter out all the transactions above the account's funds
removed := l.txs.Filter(func(tx *types.Transaction) bool {
- return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit) > 0
+ return tx.Gas() > gasLimit || tx.Cost().Cmp(costLimit.ToBig()) > 0
})
if len(removed) == 0 {
@@ -482,7 +489,10 @@ func (l *list) LastElement() *types.Transaction {
// total cost of all transactions.
func (l *list) subTotalCost(txs []*types.Transaction) {
for _, tx := range txs {
- l.totalcost.Sub(l.totalcost, tx.Cost())
+ _, underflow := l.totalcost.SubOverflow(l.totalcost, uint256.MustFromBig(tx.Cost()))
+ if underflow {
+ panic("totalcost underflow")
+ }
}
}
diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/legacypool/list_test.go
index b5cd34b23b..8587c66f7d 100644
--- a/core/txpool/legacypool/list_test.go
+++ b/core/txpool/legacypool/list_test.go
@@ -21,8 +21,10 @@ import (
"math/rand"
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/holiman/uint256"
)
// Tests that transactions can be added to strict lists and list contents and
@@ -51,6 +53,21 @@ func TestStrictListAdd(t *testing.T) {
}
}
+// TestListAddVeryExpensive tests adding txs which exceed 256 bits in cost. It is
+// expected that the list does not panic.
+func TestListAddVeryExpensive(t *testing.T) {
+ key, _ := crypto.GenerateKey()
+ list := newList(true)
+ for i := 0; i < 3; i++ {
+ value := big.NewInt(100)
+ gasprice, _ := new(big.Int).SetString("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0)
+ gaslimit := uint64(i)
+ tx, _ := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, value, gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
+ t.Logf("cost: %x bitlen: %d\n", tx.Cost(), tx.Cost().BitLen())
+ list.Add(tx, DefaultConfig.PriceBump)
+ }
+}
+
func BenchmarkListAdd(b *testing.B) {
// Generate a list of transactions to insert
key, _ := crypto.GenerateKey()
@@ -60,7 +77,7 @@ func BenchmarkListAdd(b *testing.B) {
txs[i] = transaction(uint64(i), 0, key)
}
// Insert the transactions in a random order
- priceLimit := big.NewInt(int64(DefaultConfig.PriceLimit))
+ priceLimit := uint256.NewInt(DefaultConfig.PriceLimit)
b.ResetTimer()
for i := 0; i < b.N; i++ {
list := newList(true)
@@ -70,3 +87,25 @@ func BenchmarkListAdd(b *testing.B) {
}
}
}
+
+func BenchmarkListCapOneTx(b *testing.B) {
+ // Generate a list of transactions to insert
+ key, _ := crypto.GenerateKey()
+
+ txs := make(types.Transactions, 32)
+ for i := 0; i < len(txs); i++ {
+ txs[i] = transaction(uint64(i), 0, key)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ list := newList(true)
+ // Insert the transactions in a random order
+ for _, v := range rand.Perm(len(txs)) {
+ list.Add(txs[v], DefaultConfig.PriceBump)
+ }
+ b.StartTimer()
+ list.Cap(list.Len() - 1)
+ b.StopTimer()
+ }
+}
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
index 3f644c88cf..ac1a4962fe 100644
--- a/core/txpool/subpool.go
+++ b/core/txpool/subpool.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
+ "github.com/holiman/uint256"
)
// LazyTransaction contains a small subset of the transaction properties that is
@@ -34,9 +35,9 @@ type LazyTransaction struct {
Hash common.Hash // Transaction hash to pull up if needed
Tx *types.Transaction // Transaction if already resolved
- Time time.Time // Time when the transaction was first seen
- GasFeeCap *big.Int // Maximum fee per gas the transaction may consume
- GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay
+ Time time.Time // Time when the transaction was first seen
+ GasFeeCap *uint256.Int // Maximum fee per gas the transaction may consume
+ GasTipCap *uint256.Int // Maximum miner tip per gas the transaction can pay
Gas uint64 // Amount of gas required by the transaction
BlobGas uint64 // Amount of blob gas required by the transaction
@@ -44,11 +45,17 @@ type LazyTransaction struct {
// Resolve retrieves the full transaction belonging to a lazy handle if it is still
// maintained by the transaction pool.
+//
+// Note, the method will *not* cache the retrieved transaction if the original
+// pool has not cached it. The idea being, that if the tx was too big to insert
+// originally, silently saving it will cause more trouble down the line (and
+// indeed seems to have caused a memory bloat in the original implementation
+// which did just that).
func (ltx *LazyTransaction) Resolve() *types.Transaction {
- if ltx.Tx == nil {
- ltx.Tx = ltx.Pool.Get(ltx.Hash)
+ if ltx.Tx != nil {
+ return ltx.Tx
}
- return ltx.Tx
+ return ltx.Pool.Get(ltx.Hash)
}
// LazyResolver is a minimal interface needed for a transaction pool to satisfy
@@ -63,13 +70,28 @@ type LazyResolver interface {
// may request (and relinquish) exclusive access to certain addresses.
type AddressReserver func(addr common.Address, reserve bool) error
+// PendingFilter is a collection of filter rules to allow retrieving a subset
+// of transactions for announcement or mining.
+//
+// Note, the entries here are not arbitrary useful filters, rather each one has
+// a very specific call site in mind and each one can be evaluated very cheaply
+// by the pool implementations. Only add new ones that satisfy those constraints.
+type PendingFilter struct {
+ MinTip *uint256.Int // Minimum miner tip required to include a transaction
+ BaseFee *uint256.Int // Minimum 1559 basefee needed to include a transaction
+ BlobFee *uint256.Int // Minimum 4844 blobfee needed to include a blob transaction
+
+ OnlyPlainTxs bool // Return only plain EVM transactions (peer-join announces, block space filling)
+ OnlyBlobTxs bool // Return only blob transactions (block blob-space filling)
+}
+
// SubPool represents a specialized transaction pool that lives on its own (e.g.
// blob pool). Since independent of how many specialized pools we have, they do
// need to be updated in lockstep and assemble into one coherent view for block
// production, this interface defines the common methods that allow the primary
// transaction pool to manage the subpools.
type SubPool interface {
- // Filter is a selector used to decide whether a transaction whould be added
+ // Filter is a selector used to decide whether a transaction would be added
// to this particular subpool.
Filter(tx *types.Transaction) bool
@@ -80,7 +102,7 @@ type SubPool interface {
// These should not be passed as a constructor argument - nor should the pools
// start by themselves - in order to keep multiple subpools in lockstep with
// one another.
- Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error
+ Init(gasTip uint64, head *types.Header, reserve AddressReserver) error
// Close terminates any background processing threads and releases any held
// resources.
@@ -108,7 +130,10 @@ type SubPool interface {
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
- Pending(enforceTips bool) map[common.Address][]*LazyTransaction
+ //
+ // The transactions can also be pre-filtered by the dynamic fee components to
+ // reduce allocations and load on downstream subsystems.
+ Pending(filter PendingFilter) map[common.Address][]*LazyTransaction
// SubscribeTransactions subscribes to new transaction events. The subscriber
// can decide whether to receive notifications only for newly seen transactions
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 46c90fe2fb..22adaf0c4f 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -79,7 +79,7 @@ type TxPool struct {
// New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
-func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) {
+func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
// Retrieve the current head so that all subpools and this main coordinator
// pool will have the same starting state, even if the chain moves forward
// during initialization.
@@ -122,7 +122,7 @@ func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
log.Error("pool attempted to reserve already-owned address", "address", addr)
return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed
}
- return errors.New("address already reserved")
+ return ErrAlreadyReserved
}
p.reservations[addr] = subpool
if metrics.Enabled {
@@ -353,10 +353,13 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
// Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce.
-func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction {
+//
+// The transactions can also be pre-filtered by the dynamic fee components to
+// reduce allocations and load on downstream subsystems.
+func (p *TxPool) Pending(filter PendingFilter) map[common.Address][]*LazyTransaction {
txs := make(map[common.Address][]*LazyTransaction)
for _, subpool := range p.subpools {
- for addr, set := range subpool.Pending(enforceTips) {
+ for addr, set := range subpool.Pending(filter) {
txs[addr] = set
}
}
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index a9bd14020b..e13cbd3ea9 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -18,6 +18,7 @@ package txpool
import (
"crypto/sha256"
+ "errors"
"fmt"
"math/big"
@@ -30,6 +31,12 @@ import (
"github.com/ethereum/go-ethereum/params"
)
+var (
+ // blobTxMinBlobGasPrice is the big.Int version of the configured protocol
+ // parameter to avoid constucting a new big integer for every transaction.
+ blobTxMinBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
+)
+
// ValidationOptions define certain differences between transaction validation
// across the different pools without having to duplicate those checks.
type ValidationOptions struct {
@@ -101,15 +108,17 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
return err
}
if tx.Gas() < intrGas {
- return fmt.Errorf("%w: needed %v, allowed %v", core.ErrIntrinsicGas, intrGas, tx.Gas())
+ return fmt.Errorf("%w: gas %v, minimum needed %v", core.ErrIntrinsicGas, tx.Gas(), intrGas)
}
- // Ensure the gasprice is high enough to cover the requirement of the calling
- // pool and/or block producer
+ // Ensure the gasprice is high enough to cover the requirement of the calling pool
if tx.GasTipCapIntCmp(opts.MinTip) < 0 {
- return fmt.Errorf("%w: tip needed %v, tip permitted %v", ErrUnderpriced, opts.MinTip, tx.GasTipCap())
+ return fmt.Errorf("%w: gas tip cap %v, minimum needed %v", ErrUnderpriced, tx.GasTipCap(), opts.MinTip)
}
- // Ensure blob transactions have valid commitments
if tx.Type() == types.BlobTxType {
+ // Ensure the blob fee cap satisfies the minimum blob gas price
+ if tx.BlobGasFeeCapIntCmp(blobTxMinBlobGasPrice) < 0 {
+ return fmt.Errorf("%w: blob fee cap %v, minimum needed %v", ErrUnderpriced, tx.BlobGasFeeCap(), blobTxMinBlobGasPrice)
+ }
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return fmt.Errorf("missing sidecar in blob transaction")
@@ -118,11 +127,12 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
// data match up before doing any expensive validations
hashes := tx.BlobHashes()
if len(hashes) == 0 {
- return fmt.Errorf("blobless blob transaction")
+ return errors.New("blobless blob transaction")
}
if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob)
}
+ // Ensure commitments, proofs and hashes are valid
if err := validateBlobSidecar(hashes, sidecar); err != nil {
return err
}
diff --git a/core/types/account.go b/core/types/account.go
new file mode 100644
index 0000000000..bb0f4ca02e
--- /dev/null
+++ b/core/types/account.go
@@ -0,0 +1,87 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package types
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/common/math"
+)
+
+//go:generate go run github.com/fjl/gencodec -type Account -field-override accountMarshaling -out gen_account.go
+
+// Account represents an Ethereum account and its attached data.
+// This type is used to specify accounts in the genesis block state, and
+// is also useful for JSON encoding/decoding of accounts.
+type Account struct {
+ Code []byte `json:"code,omitempty"`
+ Storage map[common.Hash]common.Hash `json:"storage,omitempty"`
+ Balance *big.Int `json:"balance" gencodec:"required"`
+ Nonce uint64 `json:"nonce,omitempty"`
+
+ // used in tests
+ PrivateKey []byte `json:"secretKey,omitempty"`
+}
+
+type accountMarshaling struct {
+ Code hexutil.Bytes
+ Balance *math.HexOrDecimal256
+ Nonce math.HexOrDecimal64
+ Storage map[storageJSON]storageJSON
+ PrivateKey hexutil.Bytes
+}
+
+// storageJSON represents a 256 bit byte array, but allows less than 256 bits when
+// unmarshaling from hex.
+type storageJSON common.Hash
+
+func (h *storageJSON) UnmarshalText(text []byte) error {
+ text = bytes.TrimPrefix(text, []byte("0x"))
+ if len(text) > 64 {
+ return fmt.Errorf("too many hex characters in storage key/value %q", text)
+ }
+ offset := len(h) - len(text)/2 // pad on the left
+ if _, err := hex.Decode(h[offset:], text); err != nil {
+ return fmt.Errorf("invalid hex storage key/value %q", text)
+ }
+ return nil
+}
+
+func (h storageJSON) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(h[:]).MarshalText()
+}
+
+// GenesisAlloc specifies the initial state of a genesis block.
+type GenesisAlloc map[common.Address]Account
+
+func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
+ m := make(map[common.UnprefixedAddress]Account)
+ if err := json.Unmarshal(data, &m); err != nil {
+ return err
+ }
+ *ga = make(GenesisAlloc)
+ for addr, a := range m {
+ (*ga)[common.Address(addr)] = a
+ }
+ return nil
+}
diff --git a/core/types/bid.go b/core/types/bid.go
new file mode 100644
index 0000000000..6d7796acfc
--- /dev/null
+++ b/core/types/bid.go
@@ -0,0 +1,184 @@
+package types
+
+import (
+ "fmt"
+ "math/big"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto"
+)
+
+const TxDecodeConcurrencyForPerBid = 5
+
+// BidArgs represents the arguments to submit a bid.
+type BidArgs struct {
+ // RawBid from builder directly
+ RawBid *RawBid
+ // Signature of the bid from builder
+ Signature hexutil.Bytes `json:"signature"`
+
+ // PayBidTx is a payment tx to builder from sentry, which is optional
+ PayBidTx hexutil.Bytes `json:"payBidTx"`
+ PayBidTxGasUsed uint64 `json:"payBidTxGasUsed"`
+}
+
+func (b *BidArgs) EcrecoverSender() (common.Address, error) {
+ pk, err := crypto.SigToPub(b.RawBid.Hash().Bytes(), b.Signature)
+ if err != nil {
+ return common.Address{}, err
+ }
+
+ return crypto.PubkeyToAddress(*pk), nil
+}
+
+func (b *BidArgs) ToBid(builder common.Address, signer Signer) (*Bid, error) {
+ txs, err := b.RawBid.DecodeTxs(signer)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(b.PayBidTx) != 0 {
+ var payBidTx = new(Transaction)
+ err = payBidTx.UnmarshalBinary(b.PayBidTx)
+ if err != nil {
+ return nil, err
+ }
+
+ txs = append(txs, payBidTx)
+ }
+
+ bid := &Bid{
+ Builder: builder,
+ BlockNumber: b.RawBid.BlockNumber,
+ ParentHash: b.RawBid.ParentHash,
+ Txs: txs,
+ GasUsed: b.RawBid.GasUsed + b.PayBidTxGasUsed,
+ GasFee: b.RawBid.GasFee,
+ BuilderFee: b.RawBid.BuilderFee,
+ rawBid: *b.RawBid,
+ }
+
+ if bid.BuilderFee == nil {
+ bid.BuilderFee = big.NewInt(0)
+ }
+
+ return bid, nil
+}
+
+// RawBid represents a raw bid from builder directly.
+type RawBid struct {
+ BlockNumber uint64 `json:"blockNumber"`
+ ParentHash common.Hash `json:"parentHash"`
+ Txs []hexutil.Bytes `json:"txs"`
+ GasUsed uint64 `json:"gasUsed"`
+ GasFee *big.Int `json:"gasFee"`
+ BuilderFee *big.Int `json:"builderFee"`
+
+ hash atomic.Value
+}
+
+func (b *RawBid) DecodeTxs(signer Signer) ([]*Transaction, error) {
+ if len(b.Txs) == 0 {
+ return []*Transaction{}, nil
+ }
+
+ txChan := make(chan int, len(b.Txs))
+ bidTxs := make([]*Transaction, len(b.Txs))
+ decode := func(txBytes hexutil.Bytes) (*Transaction, error) {
+ tx := new(Transaction)
+ err := tx.UnmarshalBinary(txBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = Sender(signer, tx)
+ if err != nil {
+ return nil, err
+ }
+
+ return tx, nil
+ }
+
+ errChan := make(chan error, TxDecodeConcurrencyForPerBid)
+ for i := 0; i < TxDecodeConcurrencyForPerBid; i++ {
+ go func() {
+ for {
+ txIndex, ok := <-txChan
+ if !ok {
+ errChan <- nil
+ return
+ }
+
+ txBytes := b.Txs[txIndex]
+ tx, err := decode(txBytes)
+ if err != nil {
+ errChan <- err
+ return
+ }
+
+ bidTxs[txIndex] = tx
+ }
+ }()
+ }
+
+ for i := 0; i < len(b.Txs); i++ {
+ txChan <- i
+ }
+
+ close(txChan)
+
+ for i := 0; i < TxDecodeConcurrencyForPerBid; i++ {
+ err := <-errChan
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode tx, %v", err)
+ }
+ }
+
+ return bidTxs, nil
+}
+
+// Hash returns the hash of the bid.
+func (b *RawBid) Hash() common.Hash {
+ if hash := b.hash.Load(); hash != nil {
+ return hash.(common.Hash)
+ }
+
+ h := rlpHash(b)
+ b.hash.Store(h)
+
+ return h
+}
+
+// Bid represents a bid.
+type Bid struct {
+ Builder common.Address
+ BlockNumber uint64
+ ParentHash common.Hash
+ Txs Transactions
+ GasUsed uint64
+ GasFee *big.Int
+ BuilderFee *big.Int
+
+ rawBid RawBid
+}
+
+// Hash returns the bid hash.
+func (b *Bid) Hash() common.Hash {
+ return b.rawBid.Hash()
+}
+
+// BidIssue represents a bid issue.
+type BidIssue struct {
+ Validator common.Address
+ Builder common.Address
+ BidHash common.Hash
+ Message string
+}
+
+type MevParams struct {
+ ValidatorCommission uint64 // 100 means 1%
+ BidSimulationLeftOver time.Duration
+}
diff --git a/core/types/bid_error.go b/core/types/bid_error.go
new file mode 100644
index 0000000000..6b543ae64f
--- /dev/null
+++ b/core/types/bid_error.go
@@ -0,0 +1,45 @@
+package types
+
+import "errors"
+
+const (
+ InvalidBidParamError = -38001
+ InvalidPayBidTxError = -38002
+ MevNotRunningError = -38003
+ MevBusyError = -38004
+ MevNotInTurnError = -38005
+)
+
+var (
+ ErrMevNotRunning = newBidError(errors.New("the validator stop accepting bids for now, try again later"), MevNotRunningError)
+ ErrMevBusy = newBidError(errors.New("the validator is working on too many bids, try again later"), MevBusyError)
+ ErrMevNotInTurn = newBidError(errors.New("the validator is not in-turn to propose currently, try again later"), MevNotInTurnError)
+)
+
+// bidError is an API error that encompasses an invalid bid with JSON error
+// code and a binary data blob.
+type bidError struct {
+ error
+ code int
+}
+
+// ErrorCode returns the JSON error code for an invalid bid.
+// See: https://github.com/ethereum/wiki/wiki/JSON-RPC-Error-Codes-Improvement-Proposal
+func (e *bidError) ErrorCode() int {
+ return e.code
+}
+
+func NewInvalidBidError(message string) *bidError {
+ return newBidError(errors.New(message), InvalidBidParamError)
+}
+
+func NewInvalidPayBidTxError(message string) *bidError {
+ return newBidError(errors.New(message), InvalidPayBidTxError)
+}
+
+func newBidError(err error, code int) *bidError {
+ return &bidError{
+ error: err,
+ code: code,
+ }
+}
diff --git a/core/gen_genesis_account.go b/core/types/gen_account.go
similarity index 61%
rename from core/gen_genesis_account.go
rename to core/types/gen_account.go
index a9d47e6ba3..4e475896a7 100644
--- a/core/gen_genesis_account.go
+++ b/core/types/gen_account.go
@@ -1,6 +1,6 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
-package core
+package types
import (
"encoding/json"
@@ -12,62 +12,62 @@ import (
"github.com/ethereum/go-ethereum/common/math"
)
-var _ = (*genesisAccountMarshaling)(nil)
+var _ = (*accountMarshaling)(nil)
// MarshalJSON marshals as JSON.
-func (g GenesisAccount) MarshalJSON() ([]byte, error) {
- type GenesisAccount struct {
+func (a Account) MarshalJSON() ([]byte, error) {
+ type Account struct {
Code hexutil.Bytes `json:"code,omitempty"`
Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
Nonce math.HexOrDecimal64 `json:"nonce,omitempty"`
PrivateKey hexutil.Bytes `json:"secretKey,omitempty"`
}
- var enc GenesisAccount
- enc.Code = g.Code
- if g.Storage != nil {
- enc.Storage = make(map[storageJSON]storageJSON, len(g.Storage))
- for k, v := range g.Storage {
+ var enc Account
+ enc.Code = a.Code
+ if a.Storage != nil {
+ enc.Storage = make(map[storageJSON]storageJSON, len(a.Storage))
+ for k, v := range a.Storage {
enc.Storage[storageJSON(k)] = storageJSON(v)
}
}
- enc.Balance = (*math.HexOrDecimal256)(g.Balance)
- enc.Nonce = math.HexOrDecimal64(g.Nonce)
- enc.PrivateKey = g.PrivateKey
+ enc.Balance = (*math.HexOrDecimal256)(a.Balance)
+ enc.Nonce = math.HexOrDecimal64(a.Nonce)
+ enc.PrivateKey = a.PrivateKey
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
-func (g *GenesisAccount) UnmarshalJSON(input []byte) error {
- type GenesisAccount struct {
+func (a *Account) UnmarshalJSON(input []byte) error {
+ type Account struct {
Code *hexutil.Bytes `json:"code,omitempty"`
Storage map[storageJSON]storageJSON `json:"storage,omitempty"`
Balance *math.HexOrDecimal256 `json:"balance" gencodec:"required"`
Nonce *math.HexOrDecimal64 `json:"nonce,omitempty"`
PrivateKey *hexutil.Bytes `json:"secretKey,omitempty"`
}
- var dec GenesisAccount
+ var dec Account
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.Code != nil {
- g.Code = *dec.Code
+ a.Code = *dec.Code
}
if dec.Storage != nil {
- g.Storage = make(map[common.Hash]common.Hash, len(dec.Storage))
+ a.Storage = make(map[common.Hash]common.Hash, len(dec.Storage))
for k, v := range dec.Storage {
- g.Storage[common.Hash(k)] = common.Hash(v)
+ a.Storage[common.Hash(k)] = common.Hash(v)
}
}
if dec.Balance == nil {
- return errors.New("missing required field 'balance' for GenesisAccount")
+ return errors.New("missing required field 'balance' for Account")
}
- g.Balance = (*big.Int)(dec.Balance)
+ a.Balance = (*big.Int)(dec.Balance)
if dec.Nonce != nil {
- g.Nonce = uint64(*dec.Nonce)
+ a.Nonce = uint64(*dec.Nonce)
}
if dec.PrivateKey != nil {
- g.PrivateKey = *dec.PrivateKey
+ a.PrivateKey = *dec.PrivateKey
}
return nil
}
diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go
index d2a98ed7bf..a6949414f3 100644
--- a/core/types/hashing_test.go
+++ b/core/types/hashing_test.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
func TestDeriveSha(t *testing.T) {
@@ -39,7 +40,7 @@ func TestDeriveSha(t *testing.T) {
t.Fatal(err)
}
for len(txs) < 1000 {
- exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
+ exp := types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(txs, trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp)
@@ -86,7 +87,7 @@ func BenchmarkDeriveSha200(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
+ exp = types.DeriveSha(txs, trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
}
})
@@ -107,7 +108,7 @@ func TestFuzzDeriveSha(t *testing.T) {
rndSeed := mrand.Int()
for i := 0; i < 10; i++ {
seed := rndSeed + i
- exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
+ exp := types.DeriveSha(newDummy(i), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
printList(newDummy(seed))
@@ -135,7 +136,7 @@ func TestDerivableList(t *testing.T) {
},
}
for i, tc := range tcs[1:] {
- exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
+ exp := types.DeriveSha(flatList(tc), trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil)))
got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil))
if !bytes.Equal(got[:], exp[:]) {
t.Fatalf("case %d: got %x exp %x", i, got, exp)
diff --git a/core/types/transaction.go b/core/types/transaction.go
index 9a844158c0..b91f8b10b8 100644
--- a/core/types/transaction.go
+++ b/core/types/transaction.go
@@ -19,6 +19,7 @@ package types
import (
"bytes"
"errors"
+ "fmt"
"io"
"math/big"
"sync/atomic"
@@ -320,6 +321,7 @@ func (tx *Transaction) Cost() *big.Int {
// RawSignatureValues returns the V, R, S signature values of the transaction.
// The return values should not be modified by the caller.
+// The return values may be nil or zero, if the transaction is unsigned.
func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) {
return tx.inner.rawSignatureValues()
}
@@ -518,6 +520,9 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e
if err != nil {
return nil, err
}
+ if r == nil || s == nil || v == nil {
+ return nil, fmt.Errorf("%w: r: %s, s: %s, v: %s", ErrInvalidSig, r, s, v)
+ }
cpy := tx.inner.copy()
cpy.setSignatureValues(signer.ChainID(), v, r, s)
return &Transaction{inner: cpy, time: tx.time}, nil
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index 08ce80b07c..4d5b2bcdd4 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -23,6 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/holiman/uint256"
)
@@ -47,6 +48,11 @@ type txJSON struct {
S *hexutil.Big `json:"s"`
YParity *hexutil.Uint64 `json:"yParity,omitempty"`
+ // Blob transaction sidecar encoding:
+ Blobs []kzg4844.Blob `json:"blobs,omitempty"`
+ Commitments []kzg4844.Commitment `json:"commitments,omitempty"`
+ Proofs []kzg4844.Proof `json:"proofs,omitempty"`
+
// Only used for encoding:
Hash common.Hash `json:"hash"`
}
@@ -142,6 +148,11 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.S = (*hexutil.Big)(itx.S.ToBig())
yparity := itx.V.Uint64()
enc.YParity = (*hexutil.Uint64)(&yparity)
+ if sidecar := itx.Sidecar; sidecar != nil {
+ enc.Blobs = itx.Sidecar.Blobs
+ enc.Commitments = itx.Sidecar.Commitments
+ enc.Proofs = itx.Sidecar.Proofs
+ }
}
return json.Marshal(&enc)
}
diff --git a/core/types/transaction_signing_test.go b/core/types/transaction_signing_test.go
index 2a9ceb0952..b66577f7ed 100644
--- a/core/types/transaction_signing_test.go
+++ b/core/types/transaction_signing_test.go
@@ -18,11 +18,13 @@ package types
import (
"errors"
+ "fmt"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -41,7 +43,7 @@ func TestEIP155Signing(t *testing.T) {
t.Fatal(err)
}
if from != addr {
- t.Errorf("exected from and address to be equal. Got %x want %x", from, addr)
+ t.Errorf("expected from and address to be equal. Got %x want %x", from, addr)
}
}
@@ -136,3 +138,53 @@ func TestChainId(t *testing.T) {
t.Error("expected no error")
}
}
+
+type nilSigner struct {
+ v, r, s *big.Int
+ Signer
+}
+
+func (ns *nilSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) {
+ return ns.v, ns.r, ns.s, nil
+}
+
+// TestNilSigner ensures a faulty Signer implementation does not result in nil signature values or panics.
+func TestNilSigner(t *testing.T) {
+ key, _ := crypto.GenerateKey()
+ innerSigner := LatestSignerForChainID(big.NewInt(1))
+ for i, signer := range []Signer{
+ &nilSigner{v: nil, r: nil, s: nil, Signer: innerSigner},
+ &nilSigner{v: big.NewInt(1), r: big.NewInt(1), s: nil, Signer: innerSigner},
+ &nilSigner{v: big.NewInt(1), r: nil, s: big.NewInt(1), Signer: innerSigner},
+ &nilSigner{v: nil, r: big.NewInt(1), s: big.NewInt(1), Signer: innerSigner},
+ } {
+ t.Run(fmt.Sprintf("signer_%d", i), func(t *testing.T) {
+ t.Run("legacy", func(t *testing.T) {
+ legacyTx := createTestLegacyTxInner()
+ _, err := SignNewTx(key, signer, legacyTx)
+ if !errors.Is(err, ErrInvalidSig) {
+ t.Fatal("expected signature values error, no nil result or panic")
+ }
+ })
+ // test Blob tx specifically, since the signature value types changed
+ t.Run("blobtx", func(t *testing.T) {
+ blobtx := createEmptyBlobTxInner(false)
+ _, err := SignNewTx(key, signer, blobtx)
+ if !errors.Is(err, ErrInvalidSig) {
+ t.Fatal("expected signature values error, no nil result or panic")
+ }
+ })
+ })
+ }
+}
+
+func createTestLegacyTxInner() *LegacyTx {
+ return &LegacyTx{
+ Nonce: uint64(0),
+ To: nil,
+ Value: big.NewInt(0),
+ Gas: params.TxGas,
+ GasPrice: big.NewInt(params.GWei),
+ Data: nil,
+ }
+}
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index caede7cc53..25a85695ef 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -43,7 +43,7 @@ type BlobTx struct {
BlobHashes []common.Hash
// A blob transaction can optionally contain blobs. This field must be set when BlobTx
- // is used to create a transaction for sigining.
+ // is used to create a transaction for signing.
Sidecar *BlobTxSidecar `rlp:"-"`
// Signature values
diff --git a/core/types/tx_blob_test.go b/core/types/tx_blob_test.go
index 44ac48cc6f..25d09e31ce 100644
--- a/core/types/tx_blob_test.go
+++ b/core/types/tx_blob_test.go
@@ -65,6 +65,12 @@ var (
)
func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
+ blobtx := createEmptyBlobTxInner(withSidecar)
+ signer := NewCancunSigner(blobtx.ChainID.ToBig())
+ return MustSignNewTx(key, signer, blobtx)
+}
+
+func createEmptyBlobTxInner(withSidecar bool) *BlobTx {
sidecar := &BlobTxSidecar{
Blobs: []kzg4844.Blob{emptyBlob},
Commitments: []kzg4844.Commitment{emptyBlobCommit},
@@ -85,6 +91,5 @@ func createEmptyBlobTx(key *ecdsa.PrivateKey, withSidecar bool) *Transaction {
if withSidecar {
blobtx.Sidecar = sidecar
}
- signer := NewCancunSigner(blobtx.ChainID.ToBig())
- return MustSignNewTx(key, signer, blobtx)
+ return blobtx
}
diff --git a/core/vm/contracts_lightclient.go b/core/vm/contracts_lightclient.go
index 286250b1d8..2ec20a3a5e 100644
--- a/core/vm/contracts_lightclient.go
+++ b/core/vm/contracts_lightclient.go
@@ -2,6 +2,7 @@ package vm
import (
"encoding/binary"
+ "errors"
"fmt"
"net/url"
"strings"
@@ -68,7 +69,7 @@ func (c *tmHeaderValidate) Run(input []byte) (result []byte, err error) {
}()
if uint64(len(input)) <= precompileContractInputMetaDataLength {
- return nil, fmt.Errorf("invalid input")
+ return nil, errors.New("invalid input")
}
payloadLength := binary.BigEndian.Uint64(input[precompileContractInputMetaDataLength-uint64TypeLength : precompileContractInputMetaDataLength])
@@ -124,7 +125,7 @@ func (c *iavlMerkleProofValidate) Run(input []byte) (result []byte, err error) {
return c.basicIavlMerkleProofValidate.Run(input)
}
-// tmHeaderValidate implemented as a native contract.
+// tmHeaderValidateNano implemented as a native contract.
type tmHeaderValidateNano struct{}
func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 {
@@ -132,7 +133,7 @@ func (c *tmHeaderValidateNano) RequiredGas(input []byte) uint64 {
}
func (c *tmHeaderValidateNano) Run(input []byte) (result []byte, err error) {
- return nil, fmt.Errorf("suspend")
+ return nil, errors.New("suspend")
}
type iavlMerkleProofValidateNano struct{}
@@ -142,7 +143,7 @@ func (c *iavlMerkleProofValidateNano) RequiredGas(_ []byte) uint64 {
}
func (c *iavlMerkleProofValidateNano) Run(_ []byte) (result []byte, err error) {
- return nil, fmt.Errorf("suspend")
+ return nil, errors.New("suspend")
}
// ------------------------------------------------------------------------------------------------------------------------------------------------
@@ -250,7 +251,7 @@ func (c *basicIavlMerkleProofValidate) Run(input []byte) (result []byte, err err
valid := kvmp.Validate()
if !valid {
- return nil, fmt.Errorf("invalid merkle proof")
+ return nil, errors.New("invalid merkle proof")
}
return successfulMerkleResult(), nil
@@ -418,7 +419,7 @@ const (
// | 33 bytes | 64 bytes | 32 bytes |
func (c *secp256k1SignatureRecover) Run(input []byte) (result []byte, err error) {
if len(input) != int(secp256k1PubKeyLength)+int(secp256k1SignatureLength)+int(secp256k1SignatureMsgHashLength) {
- return nil, fmt.Errorf("invalid input")
+ return nil, errors.New("invalid input")
}
return c.runTMSecp256k1Signature(
@@ -432,7 +433,7 @@ func (c *secp256k1SignatureRecover) runTMSecp256k1Signature(pubkey, signatureStr
tmPubKey := secp256k1.PubKeySecp256k1(pubkey)
ok := tmPubKey.VerifyBytesWithMsgHash(msgHash, signatureStr)
if !ok {
- return nil, fmt.Errorf("invalid signature")
+ return nil, errors.New("invalid signature")
}
return tmPubKey.Address().Bytes(), nil
}
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index f9b48e132b..0bea341bc1 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -224,7 +224,7 @@ func BenchmarkPrecompiledRipeMD(bench *testing.B) {
benchmarkPrecompiled("03", t, bench)
}
-// Benchmarks the sample inputs from the identiy precompile.
+// Benchmarks the sample inputs from the identity precompile.
func BenchmarkPrecompiledIdentity(bench *testing.B) {
t := precompiledTest{
Input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 30c9eb0f78..a24703342d 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -209,7 +209,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
return nil, gas, ErrDepth
}
// Fail if we're trying to transfer more than the available balance
- if value.Sign() != 0 && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
+ if !value.IsZero() && !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, gas, ErrInsufficientBalance
}
snapshot := evm.StateDB.Snapshot()
@@ -217,7 +217,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
debug := evm.Config.Tracer != nil
if !evm.StateDB.Exist(addr) {
- if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
+ if !isPrecompile && evm.chainRules.IsEIP158 && value.IsZero() {
// Calling a non existing account, don't do anything, but ping the tracer
if debug {
if evm.depth == 0 {
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index ff78833ed9..b8055de6bc 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -17,6 +17,8 @@
package vm
import (
+ "math"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -347,9 +349,7 @@ func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
}
func opCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
- l := new(uint256.Int)
- l.SetUint64(uint64(len(scope.Contract.Code)))
- scope.Stack.push(l)
+ scope.Stack.push(new(uint256.Int).SetUint64(uint64(len(scope.Contract.Code))))
return nil, nil
}
@@ -361,7 +361,7 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
)
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
if overflow {
- uint64CodeOffset = 0xffffffffffffffff
+ uint64CodeOffset = math.MaxUint64
}
codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64())
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
@@ -379,7 +379,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
)
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
if overflow {
- uint64CodeOffset = 0xffffffffffffffff
+ uint64CodeOffset = math.MaxUint64
}
addr := common.Address(a.Bytes20())
codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go
index bcb9edb5a8..43a565c21e 100644
--- a/core/vm/interpreter.go
+++ b/core/vm/interpreter.go
@@ -163,7 +163,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
debug = in.evm.Config.Tracer != nil
)
// Don't move this deferred function, it's placed before the capturestate-deferred method,
- // so that it get's executed _after_: the capturestate needs the stacks before
+ // so that it gets executed _after_: the capturestate needs the stacks before
// they are returned to the pools
defer func() {
returnStack(stack)
diff --git a/core/vm/jump_table_test.go b/core/vm/jump_table_test.go
index f67915fff3..02558035c0 100644
--- a/core/vm/jump_table_test.go
+++ b/core/vm/jump_table_test.go
@@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/require"
)
-// TestJumpTableCopy tests that deep copy is necessery to prevent modify shared jump table
+// TestJumpTableCopy tests that deep copy is necessary to prevent modify shared jump table
func TestJumpTableCopy(t *testing.T) {
tbl := newMergeInstructionSet()
require.Equal(t, uint64(0), tbl[SLOAD].constantGas)
diff --git a/core/vm/lightclient/v1/ics23_proof.go b/core/vm/lightclient/v1/ics23_proof.go
index cd4f340fbe..3d9e6990e3 100644
--- a/core/vm/lightclient/v1/ics23_proof.go
+++ b/core/vm/lightclient/v1/ics23_proof.go
@@ -1,6 +1,7 @@
package v1
import (
+ "errors"
"fmt"
"github.com/bnb-chain/ics23"
@@ -71,7 +72,7 @@ func (op CommitmentOp) GetKey() []byte {
// in the CommitmentOp and return the CommitmentRoot of the proof.
func (op CommitmentOp) Run(args [][]byte) ([][]byte, error) {
if _, ok := op.Proof.Proof.(*ics23.CommitmentProof_Exist); !ok {
- return nil, fmt.Errorf("only exist proof supported")
+ return nil, errors.New("only exist proof supported")
}
// calculate root from proof
diff --git a/core/vm/lightclient/v1/types.go b/core/vm/lightclient/v1/types.go
index 54b2b913bf..4d1442c27c 100644
--- a/core/vm/lightclient/v1/types.go
+++ b/core/vm/lightclient/v1/types.go
@@ -3,6 +3,7 @@ package v1
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"github.com/tendermint/tendermint/crypto/ed25519"
@@ -97,7 +98,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
pos := uint64(0)
if uint64(len(cs.ChainID)) > chainIDLength {
- return nil, fmt.Errorf("chainID length should be no more than 32")
+ return nil, errors.New("chainID length should be no more than 32")
}
copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID)
pos += chainIDLength
@@ -115,7 +116,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
validator := cs.NextValidatorSet.Validators[index]
pubkey, ok := validator.PubKey.(ed25519.PubKeyEd25519)
if !ok {
- return nil, fmt.Errorf("invalid pubkey type")
+ return nil, errors.New("invalid pubkey type")
}
copy(encodingBytes[pos:pos+validatorPubkeyLength], pubkey[:])
@@ -177,16 +178,16 @@ func (h *Header) Validate(chainID string) error {
return err
}
if h.ValidatorSet == nil {
- return fmt.Errorf("invalid header: validator set is nil")
+ return errors.New("invalid header: validator set is nil")
}
if h.NextValidatorSet == nil {
- return fmt.Errorf("invalid header: next validator set is nil")
+ return errors.New("invalid header: next validator set is nil")
}
if !bytes.Equal(h.ValidatorsHash, h.ValidatorSet.Hash()) {
- return fmt.Errorf("invalid header: validator set does not match hash")
+ return errors.New("invalid header: validator set does not match hash")
}
if !bytes.Equal(h.NextValidatorsHash, h.NextValidatorSet.Hash()) {
- return fmt.Errorf("invalid header: next validator set does not match hash")
+ return errors.New("invalid header: next validator set does not match hash")
}
return nil
}
diff --git a/core/vm/lightclient/v2/lightclient.go b/core/vm/lightclient/v2/lightclient.go
index 64e21f64be..acdb95e15c 100644
--- a/core/vm/lightclient/v2/lightclient.go
+++ b/core/vm/lightclient/v2/lightclient.go
@@ -4,6 +4,7 @@ package v2
import (
"bytes"
"encoding/binary"
+ "errors"
"fmt"
"github.com/cometbft/cometbft/crypto/ed25519"
@@ -49,7 +50,7 @@ func (cs ConsensusState) EncodeConsensusState() ([]byte, error) {
pos := uint64(0)
if uint64(len(cs.ChainID)) > chainIDLength {
- return nil, fmt.Errorf("chainID length should be no more than 32")
+ return nil, errors.New("chainID length should be no more than 32")
}
copy(encodingBytes[pos:pos+chainIDLength], cs.ChainID)
pos += chainIDLength
@@ -197,7 +198,7 @@ func DecodeConsensusState(input []byte) (ConsensusState, error) {
// 32 bytes | | |
func DecodeLightBlockValidationInput(input []byte) (*ConsensusState, *types.LightBlock, error) {
if uint64(len(input)) <= consensusStateLengthBytesLength {
- return nil, nil, fmt.Errorf("invalid input")
+ return nil, nil, errors.New("invalid input")
}
csLen := binary.BigEndian.Uint64(input[consensusStateLengthBytesLength-uint64TypeLength : consensusStateLengthBytesLength])
diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go
index bca6d1e83b..f420a24105 100644
--- a/core/vm/operations_acl.go
+++ b/core/vm/operations_acl.go
@@ -187,7 +187,12 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
// outside of this function, as part of the dynamic gas, and that will make it
// also become correctly reported to tracers.
contract.Gas += coldCost
- return gas + coldCost, nil
+
+ var overflow bool
+ if gas, overflow = math.SafeAdd(gas, coldCost); overflow {
+ return 0, ErrGasUintOverflow
+ }
+ return gas, nil
}
}
diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go
index 0025db9900..ed905e0f3a 100644
--- a/core/vote/vote_pool_test.go
+++ b/core/vote/vote_pool_test.go
@@ -20,6 +20,7 @@ import (
"container/heap"
"context"
"encoding/json"
+ "errors"
"fmt"
"math/big"
"os"
@@ -81,13 +82,13 @@ func (b *testBackend) EventMux() *event.TypeMux { return b.eventMux }
func (p *mockPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
parentHeader := chain.GetHeaderByHash(headers[len(headers)-1].ParentHash)
if parentHeader == nil {
- return 0, common.Hash{}, fmt.Errorf("unexpected error")
+ return 0, common.Hash{}, errors.New("unexpected error")
}
return parentHeader.Number.Uint64(), parentHeader.Hash(), nil
}
func (p *mockInvalidPOSA) GetJustifiedNumberAndHash(chain consensus.ChainHeaderReader, headers []*types.Header) (uint64, common.Hash, error) {
- return 0, common.Hash{}, fmt.Errorf("not supported")
+ return 0, common.Hash{}, errors.New("not supported")
}
func (m *mockPOSA) VerifyVote(chain consensus.ChainHeaderReader, vote *types.VoteEnvelope) error {
@@ -145,7 +146,7 @@ func testVotePool(t *testing.T, isValidRules bool) {
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
+ Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
}
mux := new(event.TypeMux)
diff --git a/core/vote/vote_signer.go b/core/vote/vote_signer.go
index 8153f40ca9..ac17518272 100644
--- a/core/vote/vote_signer.go
+++ b/core/vote/vote_signer.go
@@ -2,7 +2,6 @@ package vote
import (
"context"
- "fmt"
"os"
"time"
@@ -38,7 +37,7 @@ func NewVoteSigner(blsPasswordPath, blsWalletPath string) (*VoteSigner, error) {
}
if !dirExists {
log.Error("BLS wallet did not exists.")
- return nil, fmt.Errorf("BLS wallet did not exists")
+ return nil, errors.New("BLS wallet did not exists")
}
walletPassword, err := os.ReadFile(blsPasswordPath)
diff --git a/crypto/bls12381/g2.go b/crypto/bls12381/g2.go
index e5fe75af20..b942bf94fd 100644
--- a/crypto/bls12381/g2.go
+++ b/crypto/bls12381/g2.go
@@ -27,7 +27,7 @@ import (
// If z is equal to one the point is considered as in affine form.
type PointG2 [3]fe2
-// Set copies valeus of one point to another.
+// Set copies values of one point to another.
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
p[0].set(&p2[0])
p[1].set(&p2[1])
diff --git a/crypto/bn256/google/bn256.go b/crypto/bn256/google/bn256.go
index 0a9d5cd35d..93953e23a9 100644
--- a/crypto/bn256/google/bn256.go
+++ b/crypto/bn256/google/bn256.go
@@ -166,7 +166,7 @@ type G2 struct {
p *twistPoint
}
-// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
+// RandomG2 returns x and g₂ˣ where x is a random, non-zero number read from r.
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
var k *big.Int
var err error
diff --git a/crypto/kzg4844/kzg4844.go b/crypto/kzg4844/kzg4844.go
index 4561ef9de9..52124df674 100644
--- a/crypto/kzg4844/kzg4844.go
+++ b/crypto/kzg4844/kzg4844.go
@@ -21,21 +21,60 @@ import (
"embed"
"errors"
"hash"
+ "reflect"
"sync/atomic"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
)
//go:embed trusted_setup.json
var content embed.FS
+var (
+ blobT = reflect.TypeOf(Blob{})
+ commitmentT = reflect.TypeOf(Commitment{})
+ proofT = reflect.TypeOf(Proof{})
+)
+
// Blob represents a 4844 data blob.
type Blob [131072]byte
+// UnmarshalJSON parses a blob in hex syntax.
+func (b *Blob) UnmarshalJSON(input []byte) error {
+ return hexutil.UnmarshalFixedJSON(blobT, input, b[:])
+}
+
+// MarshalText returns the hex representation of b.
+func (b Blob) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(b[:]).MarshalText()
+}
+
// Commitment is a serialized commitment to a polynomial.
type Commitment [48]byte
+// UnmarshalJSON parses a commitment in hex syntax.
+func (c *Commitment) UnmarshalJSON(input []byte) error {
+ return hexutil.UnmarshalFixedJSON(commitmentT, input, c[:])
+}
+
+// MarshalText returns the hex representation of c.
+func (c Commitment) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(c[:]).MarshalText()
+}
+
// Proof is a serialized commitment to the quotient polynomial.
type Proof [48]byte
+// UnmarshalJSON parses a proof in hex syntax.
+func (p *Proof) UnmarshalJSON(input []byte) error {
+ return hexutil.UnmarshalFixedJSON(proofT, input, p[:])
+}
+
+// MarshalText returns the hex representation of p.
+func (p Proof) MarshalText() ([]byte, error) {
+ return hexutil.Bytes(p[:]).MarshalText()
+}
+
// Point is a BLS field element.
type Point [32]byte
diff --git a/eth/api_admin.go b/eth/api_admin.go
index 4a3ccb84e8..76a0d087bb 100644
--- a/eth/api_admin.go
+++ b/eth/api_admin.go
@@ -24,6 +24,7 @@ import (
"os"
"strings"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
@@ -141,3 +142,31 @@ func (api *AdminAPI) ImportChain(file string) (bool, error) {
}
return true, nil
}
+
+// MevRunning returns true if the validator accept bids from builder
+func (api *AdminAPI) MevRunning() bool {
+ return api.eth.APIBackend.MevRunning()
+}
+
+// StartMev starts mev. It notifies the miner to start to receive bids.
+func (api *AdminAPI) StartMev() {
+ api.eth.APIBackend.StartMev()
+}
+
+// StopMev stops mev. It notifies the miner to stop receiving bids from this moment,
+// but the bids before this moment would still been taken into consideration by mev.
+func (api *AdminAPI) StopMev() {
+ api.eth.APIBackend.StopMev()
+}
+
+// AddBuilder adds a builder to the bid simulator.
+// url is the endpoint of the builder, for example, "https://mev-builder.amazonaws.com",
+// if validator is equipped with sentry, ignore the url.
+func (api *AdminAPI) AddBuilder(builder common.Address, url string) error {
+ return api.eth.APIBackend.AddBuilder(builder, url)
+}
+
+// RemoveBuilder removes a builder from the bid simulator.
+func (api *AdminAPI) RemoveBuilder(builder common.Address) error {
+ return api.eth.APIBackend.RemoveBuilder(builder)
+}
diff --git a/eth/api_backend.go b/eth/api_backend.go
index bf66f078aa..bcf046679a 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -297,7 +297,7 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction)
}
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
- pending := b.eth.txPool.Pending(false)
+ pending := b.eth.txPool.Pending(txpool.PendingFilter{})
var txs types.Transactions
for _, batch := range pending {
for _, lazy := range batch {
@@ -456,3 +456,39 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) {
return b.eth.stateAtTransaction(ctx, block, txIndex, reexec)
}
+
+func (b *EthAPIBackend) MevRunning() bool {
+ return b.Miner().MevRunning()
+}
+
+func (b *EthAPIBackend) MevParams() *types.MevParams {
+ return b.Miner().MevParams()
+}
+
+func (b *EthAPIBackend) StartMev() {
+ b.Miner().StartMev()
+}
+
+func (b *EthAPIBackend) StopMev() {
+ b.Miner().StopMev()
+}
+
+func (b *EthAPIBackend) AddBuilder(builder common.Address, url string) error {
+ return b.Miner().AddBuilder(builder, url)
+}
+
+func (b *EthAPIBackend) RemoveBuilder(builder common.Address) error {
+ return b.Miner().RemoveBuilder(builder)
+}
+
+func (b *EthAPIBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
+ return b.Miner().SendBid(ctx, bid)
+}
+
+func (b *EthAPIBackend) BestBidGasFee(parentHash common.Hash) *big.Int {
+ return b.Miner().BestPackedBlockReward(parentHash)
+}
+
+func (b *EthAPIBackend) MinerInTurn() bool {
+ return b.Miner().InTurn()
+}
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index d3296e745b..238d0e4ca8 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -29,7 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
"golang.org/x/exp/slices"
)
@@ -63,7 +63,7 @@ func TestAccountRange(t *testing.T) {
t.Parallel()
var (
- statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
+ statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
sdb, _ = state.New(types.EmptyRootHash, statedb, nil)
addrs = [AccountRangeMaxResults * 2]common.Address{}
m = map[common.Address]bool{}
@@ -163,7 +163,7 @@ func TestStorageRangeAt(t *testing.T) {
// Create a state where account 0x010000... has a few storage entries.
var (
- db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: true})
+ db = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &triedb.Config{Preimages: true})
sdb, _ = state.New(types.EmptyRootHash, db, nil)
addr = common.Address{0x01}
keys = []common.Hash{ // hashes of Keys of storage
diff --git a/eth/api_miner.go b/eth/api_miner.go
index 477531d494..764d0ae5e2 100644
--- a/eth/api_miner.go
+++ b/eth/api_miner.go
@@ -29,7 +29,7 @@ type MinerAPI struct {
e *Ethereum
}
-// NewMinerAPI create a new MinerAPI instance.
+// NewMinerAPI creates a new MinerAPI instance.
func NewMinerAPI(e *Ethereum) *MinerAPI {
return &MinerAPI{e}
}
@@ -64,6 +64,7 @@ func (api *MinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
api.e.lock.Unlock()
api.e.txPool.SetGasTip((*big.Int)(&gasPrice))
+ api.e.Miner().SetGasTip((*big.Int)(&gasPrice))
return true
}
diff --git a/eth/backend.go b/eth/backend.go
index d60e24605c..487826c0ea 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -64,7 +64,11 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
+)
+
+const (
+ ChainDBNamespace = "eth/db/chaindata/"
)
// Config contains the configuration options of the ETH protocol.
@@ -134,7 +138,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Assemble the Ethereum object
chainDb, err := stack.OpenAndMergeDatabase("chaindata", config.DatabaseCache, config.DatabaseHandles,
- config.DatabaseFreezer, config.DatabaseDiff, "eth/db/chaindata/", false, config.PersistDiff, config.PruneAncientData)
+ config.DatabaseFreezer, config.DatabaseDiff, ChainDBNamespace, false, config.PersistDiff, config.PruneAncientData)
if err != nil {
return nil, err
}
@@ -290,7 +294,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
}
legacyPool := legacypool.New(config.TxPool, eth.blockchain)
- eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
+ eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
if err != nil {
return nil, err
}
@@ -327,7 +331,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
parlia.VotePool = votePool
}
} else {
- return nil, fmt.Errorf("Engine is not Parlia type")
+ return nil, errors.New("Engine is not Parlia type")
}
log.Info("Create votePool successfully")
eth.handler.votepool = votePool
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 8e0f81ee39..1cd8c51b25 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -30,9 +30,11 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
+ "github.com/ethereum/go-ethereum/internal/version"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/params/forks"
"github.com/ethereum/go-ethereum/rpc"
)
@@ -88,6 +90,7 @@ var caps = []string{
"engine_newPayloadV3",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByRangeV1",
+ "engine_getClientVersionV1",
}
type ConsensusAPI struct {
@@ -173,8 +176,8 @@ func newConsensusAPIWithoutHeartbeat(eth *eth.Ethereum) *ConsensusAPI {
// and return its payloadID.
func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if payloadAttributes != nil {
- if payloadAttributes.Withdrawals != nil {
- return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
+ if payloadAttributes.Withdrawals != nil || payloadAttributes.BeaconRoot != nil {
+ return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals and beacon root not supported in V1"))
}
if api.eth.BlockChain().Config().IsShanghai(api.eth.BlockChain().Config().LondonBlock, payloadAttributes.Timestamp) {
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai"))
@@ -183,23 +186,31 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, pa
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1, false)
}
-// ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload attributes.
+// ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload
+// attributes. It supports both PayloadAttributesV1 and PayloadAttributesV2.
func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if params != nil {
- if params.Withdrawals == nil {
- return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals"))
+ switch api.eth.BlockChain().Config().LatestFork(params.Timestamp) {
+ case forks.Paris:
+ if params.Withdrawals != nil {
+ return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals before shanghai"))
+ }
+ case forks.Shanghai:
+ if params.Withdrawals == nil {
+ return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("missing withdrawals"))
+ }
+ default:
+ return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called with paris and shanghai payloads"))
}
if params.BeaconRoot != nil {
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("unexpected beacon root"))
}
- if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Shanghai {
- return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called for shanghai payloads"))
- }
}
return api.forkchoiceUpdated(update, params, engine.PayloadV2, false)
}
-// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root in the payload attributes.
+// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root
+// in the payload attributes. It supports only PayloadAttributesV3.
func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if params != nil {
// TODO(matt): according to https://github.com/ethereum/execution-apis/pull/498,
@@ -477,7 +488,7 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl
// NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.PayloadStatusV1, error) {
if api.eth.BlockChain().Config().IsCancun(api.eth.BlockChain().Config().LondonBlock, params.Timestamp) {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("can't use new payload v2 post-shanghai"))
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("can't use newPayloadV2 post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) == forks.Shanghai {
if params.Withdrawals == nil {
@@ -492,7 +503,7 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun"))
}
if params.BlobGasUsed != nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil params.BlobGasUsed pre-cancun"))
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
return api.newPayload(params, nil, nil)
}
@@ -506,14 +517,14 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
}
if params.BlobGasUsed == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil params.BlobGasUsed post-cancun"))
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
}
if beaconRoot == nil {
- return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil parentBeaconBlockRoot post-cancun"))
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
@@ -796,6 +807,23 @@ func (api *ConsensusAPI) ExchangeCapabilities([]string) []string {
return caps
}
+// GetClientVersionV1 exchanges client version data of this node.
+func (api *ConsensusAPI) GetClientVersionV1(info engine.ClientVersionV1) []engine.ClientVersionV1 {
+ log.Trace("Engine API request received", "method", "GetClientVersionV1", "info", info.String())
+ commit := make([]byte, 4)
+ if vcs, ok := version.VCS(); ok {
+ commit = common.FromHex(vcs.Commit)[0:4]
+ }
+ return []engine.ClientVersionV1{
+ {
+ Code: engine.ClientCode,
+ Name: engine.ClientName,
+ Version: params.VersionWithMeta,
+ Commit: hexutil.Encode(commit),
+ },
+ }
+}
+
// GetPayloadBodiesByHashV1 implements engine_getPayloadBodiesByHashV1 which allows for retrieval of a list
// of block bodies by the engine api.
func (api *ConsensusAPI) GetPayloadBodiesByHashV1(hashes []common.Hash) []*engine.ExecutionPayloadBodyV1 {
@@ -842,8 +870,7 @@ func getBody(block *types.Block) *engine.ExecutionPayloadBodyV1 {
)
for j, tx := range body.Transactions {
- data, _ := tx.MarshalBinary()
- txs[j] = hexutil.Bytes(data)
+ txs[j], _ = tx.MarshalBinary()
}
// Post-shanghai withdrawals MUST be set to empty slice instead of nil
diff --git a/eth/catalyst/simulated_beacon.go b/eth/catalyst/simulated_beacon.go
index 5ad50f14c1..f1c5689e1d 100644
--- a/eth/catalyst/simulated_beacon.go
+++ b/eth/catalyst/simulated_beacon.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/log"
@@ -263,7 +264,7 @@ func (c *SimulatedBeacon) Rollback() {
// Fork sets the head to the provided hash.
func (c *SimulatedBeacon) Fork(parentHash common.Hash) error {
- if len(c.eth.TxPool().Pending(false)) != 0 {
+ if len(c.eth.TxPool().Pending(txpool.PendingFilter{})) != 0 {
return errors.New("pending block dirty")
}
parent := c.eth.BlockChain().GetBlockByHash(parentHash)
@@ -275,7 +276,7 @@ func (c *SimulatedBeacon) Fork(parentHash common.Hash) error {
// AdjustTime creates a new block with an adjusted timestamp.
func (c *SimulatedBeacon) AdjustTime(adjustment time.Duration) error {
- if len(c.eth.TxPool().Pending(false)) != 0 {
+ if len(c.eth.TxPool().Pending(txpool.PendingFilter{})) != 0 {
return errors.New("could not adjust time on non-empty block")
}
parent := c.eth.BlockChain().CurrentBlock()
diff --git a/eth/downloader/api.go b/eth/downloader/api.go
index c1352b317b..46a4f4d6b8 100644
--- a/eth/downloader/api.go
+++ b/eth/downloader/api.go
@@ -39,7 +39,7 @@ type DownloaderAPI struct {
uninstallSyncSubscription chan *uninstallSyncSubscriptionRequest
}
-// NewDownloaderAPI create a new DownloaderAPI. The API has an internal event loop that
+// NewDownloaderAPI creates a new DownloaderAPI. The API has an internal event loop that
// listens for events from the downloader through the global event mux. In case it receives one of
// these events it broadcasts it to all syncing subscriptions that are installed through the
// installSyncSubscription channel.
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index e524a39d78..7e7e63bef9 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -36,7 +36,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -205,7 +205,7 @@ type BlockChain interface {
// TrieDB retrieves the low level trie database used for interacting
// with trie nodes.
- TrieDB() *trie.Database
+ TrieDB() *triedb.Database
}
type DownloadOption func(downloader *Downloader) *Downloader
@@ -567,6 +567,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *
if err := d.lightchain.SetHead(origin); err != nil {
return err
}
+ log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
}
}
// Initiate the sync using a concurrent header and content retrieval algorithm
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 4bdffd33a7..27fb95e74d 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -69,7 +69,7 @@ func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
})
gspec := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
@@ -443,9 +443,6 @@ func assertOwnChain(t *testing.T, tester *downloadTester, length int) {
func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) }
func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) }
func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) }
-func TestCanonicalSynchronisation67Full(t *testing.T) { testCanonSync(t, eth.ETH67, FullSync) }
-func TestCanonicalSynchronisation67Snap(t *testing.T) { testCanonSync(t, eth.ETH67, SnapSync) }
-func TestCanonicalSynchronisation67Light(t *testing.T) { testCanonSync(t, eth.ETH67, LightSync) }
func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -466,8 +463,6 @@ func testCanonSync(t *testing.T, protocol uint, mode SyncMode) {
// until the cached blocks are retrieved.
func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) }
func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) }
-func TestThrottling67Full(t *testing.T) { testThrottling(t, eth.ETH67, FullSync) }
-func TestThrottling67Snap(t *testing.T) { testThrottling(t, eth.ETH67, SnapSync) }
func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -549,9 +544,6 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) {
func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) }
func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) }
func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) }
-func TestForkedSync67Full(t *testing.T) { testForkedSync(t, eth.ETH67, FullSync) }
-func TestForkedSync67Snap(t *testing.T) { testForkedSync(t, eth.ETH67, SnapSync) }
-func TestForkedSync67Light(t *testing.T) { testForkedSync(t, eth.ETH67, LightSync) }
func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -579,9 +571,6 @@ func testForkedSync(t *testing.T, protocol uint, mode SyncMode) {
func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) }
func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) }
func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) }
-func TestHeavyForkedSync67Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, FullSync) }
-func TestHeavyForkedSync67Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, SnapSync) }
-func TestHeavyForkedSync67Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH67, LightSync) }
func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -611,9 +600,6 @@ func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) }
func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) }
func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) }
-func TestBoundedForkedSync67Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, FullSync) }
-func TestBoundedForkedSync67Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, SnapSync) }
-func TestBoundedForkedSync67Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH67, LightSync) }
func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -648,15 +634,6 @@ func TestBoundedHeavyForkedSync68Snap(t *testing.T) {
func TestBoundedHeavyForkedSync68Light(t *testing.T) {
testBoundedHeavyForkedSync(t, eth.ETH68, LightSync)
}
-func TestBoundedHeavyForkedSync67Full(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH67, FullSync)
-}
-func TestBoundedHeavyForkedSync67Snap(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH67, SnapSync)
-}
-func TestBoundedHeavyForkedSync67Light(t *testing.T) {
- testBoundedHeavyForkedSync(t, eth.ETH67, LightSync)
-}
func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -684,9 +661,6 @@ func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {
func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) }
func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) }
func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) }
-func TestCancel67Full(t *testing.T) { testCancel(t, eth.ETH67, FullSync) }
-func TestCancel67Snap(t *testing.T) { testCancel(t, eth.ETH67, SnapSync) }
-func TestCancel67Light(t *testing.T) { testCancel(t, eth.ETH67, LightSync) }
func testCancel(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -714,9 +688,6 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) {
func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) }
func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) }
func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) }
-func TestMultiSynchronisation67Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, FullSync) }
-func TestMultiSynchronisation67Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, SnapSync) }
-func TestMultiSynchronisation67Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH67, LightSync) }
func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -741,9 +712,6 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {
func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) }
func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) }
func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) }
-func TestMultiProtoSynchronisation67Full(t *testing.T) { testMultiProtoSync(t, eth.ETH67, FullSync) }
-func TestMultiProtoSynchronisation67Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH67, SnapSync) }
-func TestMultiProtoSynchronisation67Light(t *testing.T) { testMultiProtoSync(t, eth.ETH67, LightSync) }
func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -754,7 +722,6 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
// Create peers of every type
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
- tester.newPeer("peer 67", eth.ETH67, chain.blocks[1:])
// Synchronise with the requested peer and make sure all blocks were retrieved
if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil {
@@ -763,7 +730,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
assertOwnChain(t, tester, len(chain.blocks))
// Check that no peers have been dropped off
- for _, version := range []int{68, 67} {
+ for _, version := range []int{68} {
peer := fmt.Sprintf("peer %d", version)
if _, ok := tester.peers[peer]; !ok {
t.Errorf("%s dropped", peer)
@@ -776,9 +743,6 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) }
func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) }
func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) }
-func TestEmptyShortCircuit67Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, FullSync) }
-func TestEmptyShortCircuit67Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, SnapSync) }
-func TestEmptyShortCircuit67Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH67, LightSync) }
func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -827,9 +791,6 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {
func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) }
func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) }
func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) }
-func TestMissingHeaderAttack67Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, FullSync) }
-func TestMissingHeaderAttack67Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, SnapSync) }
-func TestMissingHeaderAttack67Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH67, LightSync) }
func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -856,9 +817,6 @@ func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) }
func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) }
func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) }
-func TestShiftedHeaderAttack67Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, FullSync) }
-func TestShiftedHeaderAttack67Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, SnapSync) }
-func TestShiftedHeaderAttack67Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH67, LightSync) }
func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -892,15 +850,6 @@ func TestHighTDStarvationAttack68Snap(t *testing.T) {
func TestHighTDStarvationAttack68Light(t *testing.T) {
testHighTDStarvationAttack(t, eth.ETH68, LightSync)
}
-func TestHighTDStarvationAttack67Full(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH67, FullSync)
-}
-func TestHighTDStarvationAttack67Snap(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH67, SnapSync)
-}
-func TestHighTDStarvationAttack67Light(t *testing.T) {
- testHighTDStarvationAttack(t, eth.ETH67, LightSync)
-}
func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -915,7 +864,6 @@ func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {
// Tests that misbehaving peers are disconnected, whilst behaving ones are not.
func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) }
-func TestBlockHeaderAttackerDropping67(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH67) }
func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
// Define the disconnection requirement for individual hash fetch errors
@@ -966,9 +914,6 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {
func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) }
func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) }
func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) }
-func TestSyncProgress67Full(t *testing.T) { testSyncProgress(t, eth.ETH67, FullSync) }
-func TestSyncProgress67Snap(t *testing.T) { testSyncProgress(t, eth.ETH67, SnapSync) }
-func TestSyncProgress67Light(t *testing.T) { testSyncProgress(t, eth.ETH67, LightSync) }
func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -1046,9 +991,6 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync
func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) }
func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) }
func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) }
-func TestForkedSyncProgress67Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, FullSync) }
-func TestForkedSyncProgress67Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, SnapSync) }
-func TestForkedSyncProgress67Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH67, LightSync) }
func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -1120,9 +1062,6 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) }
func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) }
-func TestFailedSyncProgress67Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, FullSync) }
-func TestFailedSyncProgress67Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, SnapSync) }
-func TestFailedSyncProgress67Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH67, LightSync) }
func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -1189,9 +1128,6 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) }
func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) }
func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) }
-func TestFakedSyncProgress67Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, FullSync) }
-func TestFakedSyncProgress67Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, SnapSync) }
-func TestFakedSyncProgress67Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH67, LightSync) }
func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {
tester := newTester(t)
@@ -1353,8 +1289,6 @@ func TestRemoteHeaderRequestSpan(t *testing.T) {
// being fast-synced from, avoiding potential cheap eclipse attacks.
func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) }
func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) }
-func TestBeaconSync67Full(t *testing.T) { testBeaconSync(t, eth.ETH67, FullSync) }
-func TestBeaconSync67Snap(t *testing.T) { testBeaconSync(t, eth.ETH67, SnapSync) }
func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go
index 1bf03411d1..46f3febd8b 100644
--- a/eth/downloader/testchain_test.go
+++ b/eth/downloader/testchain_test.go
@@ -30,7 +30,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
// Test chain parameters.
@@ -41,10 +41,10 @@ var (
testGspec = &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- testGenesis = testGspec.MustCommit(testDB, trie.NewDatabase(testDB, trie.HashDefaults))
+ testGenesis = testGspec.MustCommit(testDB, triedb.NewDatabase(testDB, triedb.HashDefaults))
)
// The common prefix of all test chains:
diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go
index fc0b2db8d7..21a8839eb8 100644
--- a/eth/fetcher/block_fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -41,10 +42,10 @@ var (
testAddress = crypto.PubkeyToAddress(testKey.PublicKey)
gspec = &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
+ Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
}
- genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults))
+ genesis = gspec.MustCommit(testdb, triedb.NewDatabase(testdb, triedb.HashDefaults))
unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil))
)
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index cfb588c858..59b6165863 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -34,7 +34,7 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
func makeReceipt(addr common.Address) *types.Receipt {
@@ -57,7 +57,7 @@ func BenchmarkFilters(b *testing.B) {
addr4 = common.BytesToAddress([]byte("random addresses please"))
gspec = &core.Genesis{
- Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
+ Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}},
BaseFee: big.NewInt(params.InitialBaseFee),
Config: params.TestChainConfig,
}
@@ -86,7 +86,7 @@ func BenchmarkFilters(b *testing.B) {
// The test txs are not properly signed, can't simply create a chain
// and then import blocks. TODO(rjl493456442) try to get rid of the
// manual database writes.
- gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults))
+ gspec.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults))
for i, block := range chain {
rawdb.WriteBlock(db, block)
@@ -165,7 +165,7 @@ func TestFilters(t *testing.T) {
gspec = &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))},
contract: {Balance: big.NewInt(0), Code: bytecode},
contract2: {Balance: big.NewInt(0), Code: bytecode},
@@ -181,7 +181,7 @@ func TestFilters(t *testing.T) {
// Hack: GenerateChainWithGenesis creates a new db.
// Commit the genesis manually and use GenerateChain.
- _, err = gspec.Commit(db, trie.NewDatabase(db, nil))
+ _, err = gspec.Commit(db, triedb.NewDatabase(db, nil))
if err != nil {
t.Fatal(err)
}
diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go
index fa41594739..c6ce443a06 100644
--- a/eth/gasprice/gasprice_test.go
+++ b/eth/gasprice/gasprice_test.go
@@ -135,7 +135,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
config = *params.TestChainConfig // needs copy because it is modified below
gspec = &core.Genesis{
Config: &config,
- Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
+ Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
}
signer = types.LatestSigner(gspec.Config)
)
diff --git a/eth/handler.go b/eth/handler.go
index ffb80685f8..15e51af5c6 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -86,7 +86,7 @@ type txPool interface {
// Pending should return pending transactions.
// The slice should be modifiable by the caller.
- Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction
+ Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction
// SubscribeTransactions subscribes to new transaction events. The subscriber
// can decide whether to receive notifications only for newly seen transactions
diff --git a/eth/handler_bsc_test.go b/eth/handler_bsc_test.go
index 842db2726f..076b08c213 100644
--- a/eth/handler_bsc_test.go
+++ b/eth/handler_bsc_test.go
@@ -36,7 +36,7 @@ func (h *testBscHandler) Handle(peer *bsc.Peer, packet bsc.Packet) error {
}
}
-func TestSendVotes67(t *testing.T) { testSendVotes(t, eth.ETH67) }
+func TestSendVotes68(t *testing.T) { testSendVotes(t, eth.ETH68) }
func testSendVotes(t *testing.T, protocol uint) {
t.Parallel()
@@ -63,10 +63,6 @@ func testSendVotes(t *testing.T, protocol uint) {
time.Sleep(250 * time.Millisecond) // Wait until vote events get out of the system (can't use events, vote broadcaster races with peer join)
protos := []p2p.Protocol{
- {
- Name: "eth",
- Version: eth.ETH67,
- },
{
Name: "eth",
Version: eth.ETH68,
@@ -77,10 +73,6 @@ func testSendVotes(t *testing.T, protocol uint) {
},
}
caps := []p2p.Cap{
- {
- Name: "eth",
- Version: eth.ETH67,
- },
{
Name: "eth",
Version: eth.ETH68,
@@ -163,7 +155,7 @@ func testSendVotes(t *testing.T, protocol uint) {
}
}
-func TestRecvVotes67(t *testing.T) { testRecvVotes(t, eth.ETH67) }
+func TestRecvVotes68(t *testing.T) { testRecvVotes(t, eth.ETH68) }
func testRecvVotes(t *testing.T, protocol uint) {
t.Parallel()
@@ -173,10 +165,6 @@ func testRecvVotes(t *testing.T, protocol uint) {
defer handler.close()
protos := []p2p.Protocol{
- {
- Name: "eth",
- Version: eth.ETH67,
- },
{
Name: "eth",
Version: eth.ETH68,
@@ -187,10 +175,6 @@ func testRecvVotes(t *testing.T, protocol uint) {
},
}
caps := []p2p.Cap{
- {
- Name: "eth",
- Version: eth.ETH67,
- },
{
Name: "eth",
Version: eth.ETH68,
diff --git a/eth/handler_eth.go b/eth/handler_eth.go
index 0b3da37928..2b778acd5f 100644
--- a/eth/handler_eth.go
+++ b/eth/handler_eth.go
@@ -67,10 +67,7 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
case *eth.NewBlockPacket:
return h.handleBlockBroadcast(peer, packet.Block, packet.TD)
- case *eth.NewPooledTransactionHashesPacket67:
- return h.txFetcher.Notify(peer.ID(), nil, nil, *packet)
-
- case *eth.NewPooledTransactionHashesPacket68:
+ case *eth.NewPooledTransactionHashesPacket:
return h.txFetcher.Notify(peer.ID(), packet.Types, packet.Sizes, packet.Hashes)
case *eth.TransactionsPacket:
diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go
index d7470f5a0f..534b72b865 100644
--- a/eth/handler_eth_test.go
+++ b/eth/handler_eth_test.go
@@ -61,11 +61,7 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
h.blockBroadcasts.Send(packet.Block)
return nil
- case *eth.NewPooledTransactionHashesPacket67:
- h.txAnnounces.Send(([]common.Hash)(*packet))
- return nil
-
- case *eth.NewPooledTransactionHashesPacket68:
+ case *eth.NewPooledTransactionHashesPacket:
h.txAnnounces.Send(packet.Hashes)
return nil
@@ -84,7 +80,6 @@ func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {
// Tests that peers are correctly accepted (or rejected) based on the advertised
// fork IDs in the protocol handshake.
-func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) }
func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) }
func testForkIDSplit(t *testing.T, protocol uint) {
@@ -256,7 +251,6 @@ func testForkIDSplit(t *testing.T, protocol uint) {
}
// Tests that received transactions are added to the local pool.
-func TestRecvTransactions67(t *testing.T) { testRecvTransactions(t, eth.ETH67) }
func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) }
func testRecvTransactions(t *testing.T, protocol uint) {
@@ -313,7 +307,7 @@ func testRecvTransactions(t *testing.T, protocol uint) {
}
}
-func TestWaitSnapExtensionTimout67(t *testing.T) { testWaitSnapExtensionTimout(t, eth.ETH67) }
+func TestWaitSnapExtensionTimout68(t *testing.T) { testWaitSnapExtensionTimout(t, eth.ETH68) }
func testWaitSnapExtensionTimout(t *testing.T, protocol uint) {
t.Parallel()
@@ -350,7 +344,7 @@ func testWaitSnapExtensionTimout(t *testing.T, protocol uint) {
}
}
-func TestWaitBscExtensionTimout67(t *testing.T) { testWaitBscExtensionTimout(t, eth.ETH67) }
+func TestWaitBscExtensionTimout68(t *testing.T) { testWaitBscExtensionTimout(t, eth.ETH68) }
func testWaitBscExtensionTimout(t *testing.T, protocol uint) {
t.Parallel()
@@ -388,7 +382,6 @@ func testWaitBscExtensionTimout(t *testing.T, protocol uint) {
}
// This test checks that pending transactions are sent.
-func TestSendTransactions67(t *testing.T) { testSendTransactions(t, eth.ETH67) }
func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) }
func testSendTransactions(t *testing.T, protocol uint) {
@@ -447,7 +440,7 @@ func testSendTransactions(t *testing.T, protocol uint) {
seen := make(map[common.Hash]struct{})
for len(seen) < len(insert) {
switch protocol {
- case 67, 68:
+ case 68:
select {
case hashes := <-anns:
for _, hash := range hashes {
@@ -473,7 +466,6 @@ func testSendTransactions(t *testing.T, protocol uint) {
// Tests that transactions get propagated to all attached peers, either via direct
// broadcasts or via announcements/retrievals.
-func TestTransactionPropagation67(t *testing.T) { testTransactionPropagation(t, eth.ETH67) }
func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) }
func testTransactionPropagation(t *testing.T, protocol uint) {
@@ -561,8 +553,8 @@ func TestTransactionPendingReannounce(t *testing.T) {
defer sourcePipe.Close()
defer sinkPipe.Close()
- sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeer(enode.ID{0}, "", nil), sourcePipe, source.txpool)
- sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool)
+ sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeer(enode.ID{0}, "", nil), sourcePipe, source.txpool)
+ sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool)
defer sourcePeer.Close()
defer sinkPeer.Close()
@@ -633,8 +625,8 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
defer sourcePipe.Close()
defer sinkPipe.Close()
- sourcePeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
- sinkPeer := eth.NewPeer(eth.ETH67, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
+ sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil)
+ sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil)
defer sourcePeer.Close()
defer sinkPeer.Close()
@@ -686,7 +678,6 @@ func testBroadcastBlock(t *testing.T, peers, bcasts int) {
// Tests that a propagated malformed block (uncles or transactions don't match
// with the hashes in the header) gets discarded and not broadcast forward.
-func TestBroadcastMalformedBlock67(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH67) }
func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) }
func testBroadcastMalformedBlock(t *testing.T, protocol uint) {
@@ -783,8 +774,8 @@ func TestOptionMaxPeersPerIP(t *testing.T) {
}
uniPort++
- src := eth.NewPeer(eth.ETH67, peer1, p2pSrc, handler.txpool)
- sink := eth.NewPeer(eth.ETH67, peer2, p2pSink, handler.txpool)
+ src := eth.NewPeer(eth.ETH68, peer1, p2pSrc, handler.txpool)
+ sink := eth.NewPeer(eth.ETH68, peer2, p2pSink, handler.txpool)
defer src.Close()
defer sink.Close()
diff --git a/eth/handler_test.go b/eth/handler_test.go
index ed9bd0ef85..45da3f726d 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
)
var (
@@ -105,7 +106,7 @@ func (p *testTxPool) ReannouceTransactions(txs []*types.Transaction) []error {
}
// Pending returns all the transactions known to the pool
-func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction {
+func (p *testTxPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction {
p.lock.RLock()
defer p.lock.RUnlock()
@@ -124,8 +125,8 @@ func (p *testTxPool) Pending(enforceTips bool) map[common.Address][]*txpool.Lazy
Hash: tx.Hash(),
Tx: tx,
Time: tx.Time(),
- GasFeeCap: tx.GasFeeCap(),
- GasTipCap: tx.GasTipCap(),
+ GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ GasTipCap: uint256.MustFromBig(tx.GasTipCap()),
Gas: tx.Gas(),
BlobGas: tx.BlobGas(),
})
@@ -169,7 +170,7 @@ func newTestHandlerWithBlocks(blocks int) *testHandler {
db := rawdb.NewMemoryDatabase()
gspec := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
+ Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},
}
chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
diff --git a/eth/handler_trust.go b/eth/handler_trust.go
index 0b116b9255..e470c5316b 100644
--- a/eth/handler_trust.go
+++ b/eth/handler_trust.go
@@ -1,6 +1,7 @@
package eth
import (
+ "errors"
"fmt"
"github.com/ethereum/go-ethereum/core"
@@ -44,7 +45,7 @@ func (h *trustHandler) Handle(peer *trust.Peer, packet trust.Packet) error {
vm.HandleRootResponse(verifyResult, peer.ID())
return nil
}
- return fmt.Errorf("verify manager is nil which is unexpected")
+ return errors.New("verify manager is nil which is unexpected")
default:
return fmt.Errorf("unexpected trust packet type: %T", packet)
diff --git a/eth/protocols/eth/broadcast.go b/eth/protocols/eth/broadcast.go
index c02e2fa60e..57c98215b4 100644
--- a/eth/protocols/eth/broadcast.go
+++ b/eth/protocols/eth/broadcast.go
@@ -167,16 +167,9 @@ func (p *Peer) announceTransactions() {
if len(pending) > 0 {
done = make(chan struct{})
gopool.Submit(func() {
- if p.version >= ETH68 {
- if err := p.sendPooledTransactionHashes68(pending, pendingTypes, pendingSizes); err != nil {
- fail <- err
- return
- }
- } else {
- if err := p.sendPooledTransactionHashes66(pending); err != nil {
- fail <- err
- return
- }
+ if err := p.sendPooledTransactionHashes(pending, pendingTypes, pendingSizes); err != nil {
+ fail <- err
+ return
}
close(done)
//p.Log().Trace("Sent transaction announcements", "count", len(pending))
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index 42d0412a12..2d69ecdc83 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -93,10 +93,6 @@ type TxPool interface {
func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {
protocols := make([]p2p.Protocol, 0, len(ProtocolVersions))
for _, version := range ProtocolVersions {
- // Blob transactions require eth/68 announcements, disable everything else
- if version <= ETH67 && backend.Chain().Config().CancunTime != nil {
- continue
- }
version := version // Closure
protocols = append(protocols, p2p.Protocol{
@@ -166,26 +162,11 @@ type Decoder interface {
Time() time.Time
}
-var eth67 = map[uint64]msgHandler{
- NewBlockHashesMsg: handleNewBlockhashes,
- NewBlockMsg: handleNewBlock,
- TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes67,
- GetBlockHeadersMsg: handleGetBlockHeaders,
- BlockHeadersMsg: handleBlockHeaders,
- GetBlockBodiesMsg: handleGetBlockBodies,
- BlockBodiesMsg: handleBlockBodies,
- GetReceiptsMsg: handleGetReceipts,
- ReceiptsMsg: handleReceipts,
- GetPooledTransactionsMsg: handleGetPooledTransactions,
- PooledTransactionsMsg: handlePooledTransactions,
-}
-
var eth68 = map[uint64]msgHandler{
NewBlockHashesMsg: handleNewBlockhashes,
NewBlockMsg: handleNewBlock,
TransactionsMsg: handleTransactions,
- NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes68,
+ NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,
GetBlockHeadersMsg: handleGetBlockHeaders,
BlockHeadersMsg: handleBlockHeaders,
GetBlockBodiesMsg: handleGetBlockBodies,
@@ -209,10 +190,8 @@ func handleMessage(backend Backend, peer *Peer) error {
}
defer msg.Discard()
- var handlers = eth67
- if peer.Version() >= ETH68 {
- handlers = eth68
- }
+ var handlers = eth68
+
// Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index 310e75400b..f553e69d69 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -102,7 +102,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
gspec := &core.Genesis{
Config: config,
- Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
+ Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}},
}
chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil)
@@ -117,7 +117,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
txconfig.Journal = "" // Don't litter the disk with test journals
pool := legacypool.New(txconfig, chain)
- txpool, _ := txpool.New(new(big.Int).SetUint64(txconfig.PriceLimit), chain, []txpool.SubPool{pool})
+ txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool})
return &testBackend{
db: db,
@@ -150,7 +150,6 @@ func (b *testBackend) Handle(*Peer, Packet) error {
}
// Tests that block headers can be retrieved from a remote chain based on user queries.
-func TestGetBlockHeaders67(t *testing.T) { testGetBlockHeaders(t, ETH67) }
func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) }
func testGetBlockHeaders(t *testing.T, protocol uint) {
@@ -336,7 +335,6 @@ func testGetBlockHeaders(t *testing.T, protocol uint) {
}
// Tests that block contents can be retrieved from a remote chain based on their hashes.
-func TestGetBlockBodies67(t *testing.T) { testGetBlockBodies(t, ETH67) }
func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) }
func testGetBlockBodies(t *testing.T, protocol uint) {
@@ -431,7 +429,6 @@ func testGetBlockBodies(t *testing.T, protocol uint) {
}
// Tests that the transaction receipts can be retrieved based on hashes.
-func TestGetBlockReceipts67(t *testing.T) { testGetBlockReceipts(t, ETH67) }
func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) }
func testGetBlockReceipts(t *testing.T, protocol uint) {
diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go
index 069e92dadf..0275708a6c 100644
--- a/eth/protocols/eth/handlers.go
+++ b/eth/protocols/eth/handlers.go
@@ -383,30 +383,13 @@ func handleReceipts(backend Backend, msg Decoder, peer *Peer) error {
}, metadata)
}
-func handleNewPooledTransactionHashes67(backend Backend, msg Decoder, peer *Peer) error {
+func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {
// New transaction announcement arrived, make sure we have
// a valid and fresh chain to handle them
if !backend.AcceptTxs() {
return nil
}
- ann := new(NewPooledTransactionHashesPacket67)
- if err := msg.Decode(ann); err != nil {
- return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
- }
- // Schedule all the unknown hashes for retrieval
- for _, hash := range *ann {
- peer.markTransaction(hash)
- }
- return backend.Handle(peer, ann)
-}
-
-func handleNewPooledTransactionHashes68(backend Backend, msg Decoder, peer *Peer) error {
- // New transaction announcement arrived, make sure we have
- // a valid and fresh chain to handle them
- if !backend.AcceptTxs() {
- return nil
- }
- ann := new(NewPooledTransactionHashesPacket68)
+ ann := new(NewPooledTransactionHashesPacket)
if err := msg.Decode(ann); err != nil {
return fmt.Errorf("%w: message %v: %v", errDecode, msg, err)
}
diff --git a/eth/protocols/eth/handshake.go b/eth/protocols/eth/handshake.go
index e962f1b27d..e7d42aa0da 100644
--- a/eth/protocols/eth/handshake.go
+++ b/eth/protocols/eth/handshake.go
@@ -72,7 +72,7 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
}
p.td, p.head = status.TD, status.Head
- if p.version >= ETH67 {
+ if p.version >= ETH68 {
var upgradeStatus UpgradeStatusPacket // safe to read after two values have been received from errc
if extension == nil {
extension = &UpgradeStatusExtension{}
diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go
index bdabfb165f..3ad73b58ea 100644
--- a/eth/protocols/eth/handshake_test.go
+++ b/eth/protocols/eth/handshake_test.go
@@ -27,7 +27,6 @@ import (
)
// Tests that handshake failures are detected and reported correctly.
-func TestHandshake67(t *testing.T) { testHandshake(t, ETH67) }
func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) }
func testHandshake(t *testing.T, protocol uint) {
diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go
index c58d133d1a..0826698030 100644
--- a/eth/protocols/eth/peer.go
+++ b/eth/protocols/eth/peer.go
@@ -95,7 +95,7 @@ type Peer struct {
lock sync.RWMutex // Mutex protecting the internal fields
}
-// NewPeer create a wrapper for a network connection and negotiated protocol
+// NewPeer creates a wrapper for a network connection and negotiated protocol
// version.
func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer {
peer := &Peer{
@@ -234,29 +234,17 @@ func (p *Peer) AsyncSendTransactions(hashes []common.Hash) {
}
}
-// sendPooledTransactionHashes66 sends transaction hashes to the peer and includes
-// them in its transaction hash set for future reference.
-//
-// This method is a helper used by the async transaction announcer. Don't call it
-// directly as the queueing (memory) and transmission (bandwidth) costs should
-// not be managed directly.
-func (p *Peer) sendPooledTransactionHashes66(hashes []common.Hash) error {
- // Mark all the transactions as known, but ensure we don't overflow our limits
- p.knownTxs.Add(hashes...)
- return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket67(hashes))
-}
-
-// sendPooledTransactionHashes68 sends transaction hashes (tagged with their type
+// sendPooledTransactionHashes sends transaction hashes (tagged with their type
// and size) to the peer and includes them in its transaction hash set for future
// reference.
//
// This method is a helper used by the async transaction announcer. Don't call it
// directly as the queueing (memory) and transmission (bandwidth) costs should
// not be managed directly.
-func (p *Peer) sendPooledTransactionHashes68(hashes []common.Hash, types []byte, sizes []uint32) error {
+func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash, types []byte, sizes []uint32) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
p.knownTxs.Add(hashes...)
- return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket68{Types: types, Sizes: sizes, Hashes: hashes})
+ return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket{Types: types, Sizes: sizes, Hashes: hashes})
}
// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go
index 18d33cdb72..3e592a7d3c 100644
--- a/eth/protocols/eth/protocol.go
+++ b/eth/protocols/eth/protocol.go
@@ -30,7 +30,6 @@ import (
// Constants to match up protocol versions and messages
const (
- ETH67 = 67
ETH68 = 68
)
@@ -40,11 +39,11 @@ const ProtocolName = "eth"
// ProtocolVersions are the supported versions of the `eth` protocol (first
// is primary).
-var ProtocolVersions = []uint{ETH68, ETH67}
+var ProtocolVersions = []uint{ETH68}
// protocolLengths are the number of implemented message corresponding to
// different protocol versions.
-var protocolLengths = map[uint]uint64{ETH68: 17, ETH67: 18}
+var protocolLengths = map[uint]uint64{ETH68: 17}
// maxMessageSize is the maximum cap on the size of a protocol message.
const maxMessageSize = 10 * 1024 * 1024
@@ -313,11 +312,8 @@ type ReceiptsRLPPacket struct {
ReceiptsRLPResponse
}
-// NewPooledTransactionHashesPacket67 represents a transaction announcement packet on eth/67.
-type NewPooledTransactionHashesPacket67 []common.Hash
-
-// NewPooledTransactionHashesPacket68 represents a transaction announcement packet on eth/68 and newer.
-type NewPooledTransactionHashesPacket68 struct {
+// NewPooledTransactionHashesPacket represents a transaction announcement packet on eth/68 and newer.
+type NewPooledTransactionHashesPacket struct {
Types []byte
Sizes []uint32
Hashes []common.Hash
@@ -379,10 +375,8 @@ func (*BlockBodiesResponse) Kind() byte { return BlockBodiesMsg }
func (*NewBlockPacket) Name() string { return "NewBlock" }
func (*NewBlockPacket) Kind() byte { return NewBlockMsg }
-func (*NewPooledTransactionHashesPacket67) Name() string { return "NewPooledTransactionHashes" }
-func (*NewPooledTransactionHashesPacket67) Kind() byte { return NewPooledTransactionHashesMsg }
-func (*NewPooledTransactionHashesPacket68) Name() string { return "NewPooledTransactionHashes" }
-func (*NewPooledTransactionHashesPacket68) Kind() byte { return NewPooledTransactionHashesMsg }
+func (*NewPooledTransactionHashesPacket) Name() string { return "NewPooledTransactionHashes" }
+func (*NewPooledTransactionHashesPacket) Kind() byte { return NewPooledTransactionHashesMsg }
func (*GetPooledTransactionsRequest) Name() string { return "GetPooledTransactions" }
func (*GetPooledTransactionsRequest) Kind() byte { return GetPooledTransactionsMsg }
diff --git a/eth/protocols/snap/handler_fuzzing_test.go b/eth/protocols/snap/handler_fuzzing_test.go
index daed7ed44a..4e234ad21b 100644
--- a/eth/protocols/snap/handler_fuzzing_test.go
+++ b/eth/protocols/snap/handler_fuzzing_test.go
@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -89,7 +90,7 @@ func doFuzz(input []byte, obj interface{}, code int) {
var trieRoot common.Hash
func getChain() *core.BlockChain {
- ga := make(core.GenesisAlloc, 1000)
+ ga := make(types.GenesisAlloc, 1000)
var a = make([]byte, 20)
var mkStorage = func(k, v int) (common.Hash, common.Hash) {
var kB = make([]byte, 32)
@@ -105,7 +106,7 @@ func getChain() *core.BlockChain {
}
for i := 0; i < 1000; i++ {
binary.LittleEndian.PutUint64(a, uint64(i+0xff))
- acc := core.GenesisAccount{Balance: big.NewInt(int64(i))}
+ acc := types.Account{Balance: big.NewInt(int64(i))}
if i%2 == 1 {
acc.Storage = storage
}
diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go
index 3db6e22cbd..c57931678c 100644
--- a/eth/protocols/snap/peer.go
+++ b/eth/protocols/snap/peer.go
@@ -33,7 +33,7 @@ type Peer struct {
logger log.Logger // Contextual logger with the peer id injected
}
-// NewPeer create a wrapper for a network connection and negotiated protocol
+// NewPeer creates a wrapper for a network connection and negotiated protocol
// version.
func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
id := p.ID().String()
@@ -46,7 +46,7 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
}
}
-// NewFakePeer create a fake snap peer without a backing p2p peer, for testing purposes.
+// NewFakePeer creates a fake snap peer without a backing p2p peer, for testing purposes.
func NewFakePeer(version uint, id string, rw p2p.MsgReadWriter) *Peer {
return &Peer{
id: id,
diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go
index 73d61c2ffd..b780868b4e 100644
--- a/eth/protocols/snap/sync_test.go
+++ b/eth/protocols/snap/sync_test.go
@@ -36,8 +36,9 @@ import (
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/testutil"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
"golang.org/x/exp/slices"
@@ -1504,7 +1505,7 @@ func getCodeByHash(hash common.Hash) []byte {
// makeAccountTrieNoStorage spits out a trie, along with the leafs
func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
+ db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
)
@@ -1539,7 +1540,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
entries []*kv
boundaries []common.Hash
- db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
+ db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
)
// Initialize boundaries
@@ -1597,7 +1598,7 @@ func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) {
// has a unique storage set.
func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
+ db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
storageRoots = make(map[common.Hash]common.Hash)
@@ -1652,7 +1653,7 @@ func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots
// makeAccountTrieWithStorage spits out a trie, along with the leafs
func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool, uneven bool) (*trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) {
var (
- db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
+ db = triedb.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme))
accTrie = trie.NewEmpty(db)
entries []*kv
storageRoots = make(map[common.Hash]common.Hash)
@@ -1725,7 +1726,7 @@ func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, bounda
// makeStorageTrieWithSeed fills a storage trie with n items, returning the
// not-yet-committed trie and the sorted entries. The seeds can be used to ensure
// that tries are unique.
-func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
+func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
trie, _ := trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
var entries []*kv
for i := uint64(1); i <= n; i++ {
@@ -1748,7 +1749,7 @@ func makeStorageTrieWithSeed(owner common.Hash, n, seed uint64, db *trie.Databas
// makeBoundaryStorageTrie constructs a storage trie. Instead of filling
// storage slots normally, this function will fill a few slots which have
// boundary hash.
-func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
+func makeBoundaryStorageTrie(owner common.Hash, n int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
var (
entries []*kv
boundaries []common.Hash
@@ -1798,7 +1799,7 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo
// makeUnevenStorageTrie constructs a storage tries will states distributed in
// different range unevenly.
-func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (common.Hash, *trienode.NodeSet, []*kv) {
+func makeUnevenStorageTrie(owner common.Hash, slots int, db *triedb.Database) (common.Hash, *trienode.NodeSet, []*kv) {
var (
entries []*kv
tr, _ = trie.New(trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash), db)
@@ -1830,7 +1831,7 @@ func makeUnevenStorageTrie(owner common.Hash, slots int, db *trie.Database) (com
func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) {
t.Helper()
- triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
+ triedb := triedb.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme))
accTrie, err := trie.New(trie.StateTrieID(root), triedb)
if err != nil {
t.Fatal(err)
@@ -1967,9 +1968,9 @@ func TestSlotEstimation(t *testing.T) {
}
}
-func newDbConfig(scheme string) *trie.Config {
+func newDbConfig(scheme string) *triedb.Config {
if scheme == rawdb.HashScheme {
- return &trie.Config{}
+ return &triedb.Config{}
}
- return &trie.Config{PathDB: pathdb.Defaults}
+ return &triedb.Config{PathDB: pathdb.Defaults}
}
diff --git a/eth/protocols/trust/handler_test.go b/eth/protocols/trust/handler_test.go
index 144f4e602a..187b29c932 100644
--- a/eth/protocols/trust/handler_test.go
+++ b/eth/protocols/trust/handler_test.go
@@ -16,7 +16,7 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
var (
@@ -50,11 +50,11 @@ func newTestBackendWithGenerator(blocks int) *testBackend {
genspec := &core.Genesis{
Config: params.AllCliqueProtocolChanges,
ExtraData: make([]byte, 32+common.AddressLength+65),
- Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
+ Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}},
BaseFee: big.NewInt(0),
}
copy(genspec.ExtraData[32:], testAddr[:])
- genesis := genspec.MustCommit(db, trie.NewDatabase(db, nil))
+ genesis := genspec.MustCommit(db, triedb.NewDatabase(db, nil))
chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil)
generator := func(i int, block *core.BlockGen) {
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 26acdcd1d5..946b292926 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -33,6 +33,7 @@ import (
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
)
@@ -44,7 +45,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
var (
current *types.Block
database state.Database
- triedb *trie.Database
+ tdb *triedb.Database
report = true
origin = block.NumberU64()
)
@@ -70,14 +71,14 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
- database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults)
+ database = state.NewDatabaseWithConfig(eth.chainDb, triedb.HashDefaults)
if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, noopReleaser, nil
}
}
// The optional base statedb is given, mark the start point as parent block
- statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false
+ statedb, database, tdb, report = base, base.Database(), base.Database().TrieDB(), false
current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if current == nil {
return nil, nil, fmt.Errorf("missing parent block %v %d", block.ParentHash(), block.NumberU64()-1)
@@ -90,8 +91,8 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
// the internal junks created by tracing will be persisted into the disk.
// TODO(rjl493456442), clean cache is disabled to prevent memory leak,
// please re-enable it for better performance.
- triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults)
- database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb)
+ tdb = triedb.NewDatabase(eth.chainDb, triedb.HashDefaults)
+ database = state.NewDatabaseWithNodeDB(eth.chainDb, tdb)
// If we didn't check the live database, do check state over ephemeral database,
// otherwise we would rewind past a persisted block (specific corner case is
@@ -169,17 +170,17 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
}
// Hold the state reference and also drop the parent state
// to prevent accumulating too many nodes in memory.
- triedb.Reference(root, common.Hash{})
+ tdb.Reference(root, common.Hash{})
if parent != (common.Hash{}) {
- triedb.Dereference(parent)
+ tdb.Dereference(parent)
}
parent = root
}
if report {
- diff, nodes, immutablenodes, imgs := triedb.Size()
+ diff, nodes, immutablenodes, imgs := tdb.Size() // all memory is contained within the nodes return in hashdb
log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "layer", diff, "nodes", nodes, "immutablenodes", immutablenodes, "preimages", imgs)
}
- return statedb, func() { triedb.Dereference(block.Root()) }, nil
+ return statedb, func() { tdb.Dereference(block.Root()) }, nil
}
func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) {
diff --git a/eth/sync.go b/eth/sync.go
index d13e9c28b9..4fe0a90f41 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -22,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/log"
@@ -35,7 +36,7 @@ const (
// syncTransactions starts sending all currently pending transactions to the given peer.
func (h *handler) syncTransactions(p *eth.Peer) {
var hashes []common.Hash
- for _, batch := range h.txpool.Pending(false) {
+ for _, batch := range h.txpool.Pending(txpool.PendingFilter{OnlyPlainTxs: true}) {
for _, tx := range batch {
hashes = append(hashes, tx.Hash)
}
diff --git a/eth/sync_test.go b/eth/sync_test.go
index d26cbb66ea..a31986730f 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -28,7 +28,6 @@ import (
)
// Tests that snap sync is disabled after a successful sync cycle.
-func TestSnapSyncDisabling67(t *testing.T) { testSnapSyncDisabling(t, eth.ETH67, snap.SNAP1) }
func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) }
// Tests that snap sync gets disabled as soon as a real block is successfully
diff --git a/eth/tracers/api.go b/eth/tracers/api.go
index 7308116b68..f18a8536c2 100644
--- a/eth/tracers/api.go
+++ b/eth/tracers/api.go
@@ -1068,7 +1068,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
config.BlockOverrides.Apply(&vmctx)
}
// Execute the trace
- msg, err := args.ToMessage(api.backend.RPCGasCap(), block.BaseFee())
+ msg, err := args.ToMessage(api.backend.RPCGasCap(), vmctx.BaseFee)
if err != nil {
return nil, err
}
diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go
index 8799639366..cd07b0638b 100644
--- a/eth/tracers/api_test.go
+++ b/eth/tracers/api_test.go
@@ -193,7 +193,7 @@ func TestTraceCall(t *testing.T) {
accounts := newAccounts(3)
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
@@ -411,7 +411,7 @@ func TestTraceTransaction(t *testing.T) {
accounts := newAccounts(2)
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
},
@@ -466,7 +466,7 @@ func TestTraceBlock(t *testing.T) {
accounts := newAccounts(3)
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
@@ -556,7 +556,7 @@ func TestTracingWithOverrides(t *testing.T) {
storageAccount := common.Address{0x13, 37}
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
@@ -925,7 +925,7 @@ func TestTraceChain(t *testing.T) {
accounts := newAccounts(3)
genesis := &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go
index 0b43a021ea..6216a16ced 100644
--- a/eth/tracers/internal/tracetest/calltrace_test.go
+++ b/eth/tracers/internal/tracetest/calltrace_test.go
@@ -133,9 +133,9 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ state = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
- triedb.Close()
+ state.Close()
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
@@ -145,7 +145,7 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
- evm := vm.NewEVM(context, core.NewEVMTxContext(msg), statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
+ evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer})
vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if err != nil {
t.Fatalf("failed to execute transaction: %v", err)
@@ -235,8 +235,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
}
- triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
- defer triedb.Close()
+ state := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer state.Close()
b.ReportAllocs()
b.ResetTimer()
@@ -245,8 +245,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
- evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
- snap := statedb.Snapshot()
+ evm := vm.NewEVM(context, txContext, state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer})
+ snap := state.StateDB.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
b.Fatalf("failed to execute transaction: %v", err)
@@ -254,7 +254,7 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
if _, err = tracer.GetResult(); err != nil {
b.Fatal(err)
}
- statedb.RevertToSnapshot(snap)
+ state.StateDB.RevertToSnapshot(snap)
}
}
@@ -362,18 +362,18 @@ func TestInternals(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
- triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(),
- core.GenesisAlloc{
- to: core.GenesisAccount{
+ state := tests.MakePreState(rawdb.NewMemoryDatabase(),
+ types.GenesisAlloc{
+ to: types.Account{
Code: tc.code,
},
- origin: core.GenesisAccount{
+ origin: types.Account{
Balance: big.NewInt(500000000000000),
},
}, false, rawdb.HashScheme)
- defer triedb.Close()
+ defer state.Close()
- evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
+ evm := vm.NewEVM(context, txContext, state.StateDB, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer})
msg := &core.Message{
To: &to,
From: origin,
diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go
index b318548bc1..abee488917 100644
--- a/eth/tracers/internal/tracetest/flat_calltrace_test.go
+++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go
@@ -95,8 +95,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
- triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
- defer triedb.Close()
+ state := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ defer state.Close()
// Create the tracer, the EVM environment and run it
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
@@ -107,7 +107,7 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string
if err != nil {
return fmt.Errorf("failed to prepare transaction for tracing: %v", err)
}
- evm := vm.NewEVM(context, core.NewEVMTxContext(msg), statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
+ evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer})
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go
index 666a5fda78..8a60123dc2 100644
--- a/eth/tracers/internal/tracetest/prestate_test.go
+++ b/eth/tracers/internal/tracetest/prestate_test.go
@@ -103,9 +103,9 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
BaseFee: test.Genesis.BaseFee,
}
- triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
+ state = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme)
)
- defer triedb.Close()
+ defer state.Close()
tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig)
if err != nil {
@@ -115,7 +115,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) {
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
- evm := vm.NewEVM(context, core.NewEVMTxContext(msg), statedb, test.Genesis.Config, vm.Config{Tracer: tracer})
+ evm := vm.NewEVM(context, core.NewEVMTxContext(msg), state.StateDB, test.Genesis.Config, vm.Config{Tracer: tracer})
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
diff --git a/eth/tracers/js/internal/tracers/call_tracer_legacy.js b/eth/tracers/js/internal/tracers/call_tracer_legacy.js
index 451a644b91..0760bb1e3f 100644
--- a/eth/tracers/js/internal/tracers/call_tracer_legacy.js
+++ b/eth/tracers/js/internal/tracers/call_tracer_legacy.js
@@ -219,7 +219,7 @@
return this.finalize(result);
},
- // finalize recreates a call object using the final desired field oder for json
+ // finalize recreates a call object using the final desired field order for json
// serialization. This is a nicety feature to pass meaningfully ordered results
// to users who don't interpret it, just display it.
finalize: function(call) {
diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go
index 555c41a451..0a22936784 100644
--- a/eth/tracers/native/call.go
+++ b/eth/tracers/native/call.go
@@ -161,7 +161,7 @@ func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco
return
}
// Avoid processing nested calls when only caring about top call
- if t.config.OnlyTopCall && depth > 0 {
+ if t.config.OnlyTopCall && depth > 1 {
return
}
// Skip if tracing was interrupted
diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go
index 54d34ec5d1..6ac266e06d 100644
--- a/eth/tracers/tracers_test.go
+++ b/eth/tracers/tracers_test.go
@@ -61,7 +61,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
GasLimit: gas,
BaseFee: big.NewInt(8),
}
- alloc := core.GenesisAlloc{}
+ alloc := types.GenesisAlloc{}
// The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
// the address
loop := []byte{
@@ -69,18 +69,18 @@ func BenchmarkTransactionTrace(b *testing.B) {
byte(vm.PUSH1), 0, // jumpdestination
byte(vm.JUMP),
}
- alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = core.GenesisAccount{
+ alloc[common.HexToAddress("0x00000000000000000000000000000000deadbeef")] = types.Account{
Nonce: 1,
Code: loop,
Balance: big.NewInt(1),
}
- alloc[from] = core.GenesisAccount{
+ alloc[from] = types.Account{
Nonce: 1,
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
- triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme)
- defer triedb.Close()
+ state := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme)
+ defer state.Close()
// Create the tracer, the EVM environment and run it
tracer := logger.NewStructLogger(&logger.Config{
@@ -89,7 +89,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
//EnableMemory: false,
//EnableReturnData: false,
})
- evm := vm.NewEVM(context, txContext, statedb, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer})
+ evm := vm.NewEVM(context, txContext, state.StateDB, params.AllEthashProtocolChanges, vm.Config{Tracer: tracer})
msg, err := core.TransactionToMessage(tx, signer, context.BaseFee)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
@@ -98,13 +98,13 @@ func BenchmarkTransactionTrace(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
- snap := statedb.Snapshot()
+ snap := state.StateDB.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
_, err = st.TransitionDb()
if err != nil {
b.Fatal(err)
}
- statedb.RevertToSnapshot(snap)
+ state.StateDB.RevertToSnapshot(snap)
if have, want := len(tracer.StructLogs()), 244752; have != want {
b.Fatalf("trace wrong, want %d steps, have %d", want, have)
}
@@ -124,9 +124,9 @@ func TestMemCopying(t *testing.T) {
{0, 100, 0, "", 0}, // No need to pad (0 size)
{100, 50, 100, "", 100}, // Should pad 100-150
{100, 50, 5, "", 5}, // Wanted range fully within memory
- {100, -50, 0, "offset or size must not be negative", 0}, // Errror
- {0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Errror
- {10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Errror
+ {100, -50, 0, "offset or size must not be negative", 0}, // Error
+ {0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Error
+ {10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Error
} {
mem := vm.NewMemory()
diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go
index 2de3694319..13ce66c7ae 100644
--- a/ethclient/ethclient.go
+++ b/ethclient/ethclient.go
@@ -52,6 +52,16 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
return NewClient(c), nil
}
+// DialOptions creates a new RPC client for the given URL. You can supply any of the
+// pre-defined client options to configure the underlying transport.
+func DialOptions(ctx context.Context, rawurl string, opts ...rpc.ClientOption) (*Client, error) {
+ c, err := rpc.DialOptions(ctx, rawurl, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return NewClient(c), nil
+}
+
// NewClient creates a client that uses the given RPC client.
func NewClient(c *rpc.Client) *Client {
return &Client{c}
@@ -715,6 +725,43 @@ func (ec *Client) SendTransactionConditional(ctx context.Context, tx *types.Tran
return ec.c.CallContext(ctx, nil, "eth_sendRawTransactionConditional", hexutil.Encode(data), opts)
}
+// MevRunning returns whether MEV is running
+func (ec *Client) MevRunning(ctx context.Context) (bool, error) {
+ var result bool
+ err := ec.c.CallContext(ctx, &result, "mev_running")
+ return result, err
+}
+
+// SendBid sends a bid
+func (ec *Client) SendBid(ctx context.Context, args types.BidArgs) (common.Hash, error) {
+ var hash common.Hash
+ err := ec.c.CallContext(ctx, &hash, "mev_sendBid", args)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return hash, nil
+}
+
+// BestBidGasFee returns the gas fee of the best bid for the given parent hash.
+func (ec *Client) BestBidGasFee(ctx context.Context, parentHash common.Hash) (*big.Int, error) {
+ var fee *big.Int
+ err := ec.c.CallContext(ctx, &fee, "mev_bestBidGasFee", parentHash)
+ if err != nil {
+ return nil, err
+ }
+ return fee, nil
+}
+
+// MevParams returns the static params of mev
+func (ec *Client) MevParams(ctx context.Context) (*types.MevParams, error) {
+ var params types.MevParams
+ err := ec.c.CallContext(ctx, ¶ms, "mev_params")
+ if err != nil {
+ return nil, err
+ }
+ return ¶ms, err
+}
+
func toBlockNumArg(number *big.Int) string {
if number == nil {
return "latest"
@@ -756,6 +803,12 @@ func toCallArg(msg ethereum.CallMsg) interface{} {
if msg.AccessList != nil {
arg["accessList"] = msg.AccessList
}
+ if msg.BlobGasFeeCap != nil {
+ arg["maxFeePerBlobGas"] = (*hexutil.Big)(msg.BlobGasFeeCap)
+ }
+ if msg.BlobHashes != nil {
+ arg["blobVersionedHashes"] = msg.BlobHashes
+ }
return arg
}
diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go
index 5f2a437185..54ce597b09 100644
--- a/ethclient/ethclient_test.go
+++ b/ethclient/ethclient_test.go
@@ -39,7 +39,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
// Verify that Client implements the ethereum interfaces.
@@ -268,7 +268,7 @@ var (
var genesis = &core.Genesis{
Config: params.AllEthashProtocolChanges,
- Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}},
+ Alloc: types.GenesisAlloc{testAddr: {Balance: testBalance}},
ExtraData: []byte("test genesis"),
Timestamp: 9000,
BaseFee: big.NewInt(params.InitialBaseFeeForBSC),
@@ -340,7 +340,7 @@ func generateTestChain() []*types.Block {
signer := types.HomesteadSigner{}
// Create a database pre-initialize with a genesis block
db := rawdb.NewMemoryDatabase()
- genesis.MustCommit(db, trie.NewDatabase(db, nil))
+ genesis.MustCommit(db, triedb.NewDatabase(db, nil))
chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil, core.EnablePersistDiff(860000))
generate := func(i int, block *core.BlockGen) {
block.OffsetTime(5)
@@ -381,7 +381,7 @@ func generateTestChain() []*types.Block {
block.AddTxWithChain(chain, testTx2)
}
}
- gblock := genesis.MustCommit(db, trie.NewDatabase(db, nil))
+ gblock := genesis.MustCommit(db, triedb.NewDatabase(db, nil))
engine := ethash.NewFaker()
blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, testBlockNum, generate)
blocks = append([]*types.Block{gblock}, blocks...)
diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go
index dbe2310a62..158886475e 100644
--- a/ethclient/gethclient/gethclient_test.go
+++ b/ethclient/gethclient/gethclient_test.go
@@ -81,7 +81,7 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) {
func generateTestChain() (*core.Genesis, []*types.Block) {
genesis := &core.Genesis{
Config: params.AllEthashProtocolChanges,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}},
testContract: {Nonce: 1, Code: []byte{0x13, 0x37}},
testEmpty: {Balance: big.NewInt(1)},
diff --git a/ethclient/simulated/backend.go b/ethclient/simulated/backend.go
index 3327297c32..13e7cad586 100644
--- a/ethclient/simulated/backend.go
+++ b/ethclient/simulated/backend.go
@@ -79,7 +79,7 @@ type Backend struct {
// contract bindings in unit tests.
//
// A simulated backend always uses chainID 1337.
-func NewBackend(alloc core.GenesisAlloc, options ...func(nodeConf *node.Config, ethConf *ethconfig.Config)) *Backend {
+func NewBackend(alloc types.GenesisAlloc, options ...func(nodeConf *node.Config, ethConf *ethconfig.Config)) *Backend {
// Create the default configurations for the outer node shell and the Ethereum
// service to mutate with the options afterwards
nodeConf := node.DefaultConfig
diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go
index a9a8accfea..a8fd7913c3 100644
--- a/ethclient/simulated/backend_test.go
+++ b/ethclient/simulated/backend_test.go
@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
@@ -41,7 +40,7 @@ var (
func simTestBackend(testAddr common.Address) *Backend {
return NewBackend(
- core.GenesisAlloc{
+ types.GenesisAlloc{
testAddr: {Balance: big.NewInt(10000000000000000)},
},
)
@@ -52,7 +51,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
// create a signed transaction to send
head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
- gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1))
+ gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
nonce, err := client.PendingNonceAt(context.Background(), addr)
@@ -62,7 +61,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainid,
Nonce: nonce,
- GasTipCap: big.NewInt(1),
+ GasTipCap: big.NewInt(params.GWei),
GasFeeCap: gasPrice,
Gas: 21000,
To: &addr,
@@ -71,7 +70,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
}
func TestNewBackend(t *testing.T) {
- sim := NewBackend(core.GenesisAlloc{})
+ sim := NewBackend(types.GenesisAlloc{})
defer sim.Close()
client := sim.Client()
@@ -94,7 +93,7 @@ func TestNewBackend(t *testing.T) {
}
func TestAdjustTime(t *testing.T) {
- sim := NewBackend(core.GenesisAlloc{})
+ sim := NewBackend(types.GenesisAlloc{})
defer sim.Close()
client := sim.Client()
diff --git a/ethclient/simulated/options.go b/ethclient/simulated/options.go
index 1b2f4c090d..6db995c917 100644
--- a/ethclient/simulated/options.go
+++ b/ethclient/simulated/options.go
@@ -17,6 +17,8 @@
package simulated
import (
+ "math/big"
+
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/node"
)
@@ -37,3 +39,17 @@ func WithCallGasLimit(gaslimit uint64) func(nodeConf *node.Config, ethConf *ethc
ethConf.RPCGasCap = gaslimit
}
}
+
+// WithMinerMinTip configures the simulated backend to require a specific minimum
+// gas tip for a transaction to be included.
+//
+// 0 is not possible as a live Geth node would reject that due to DoS protection,
+// so the simulated backend will replicate that behavior for consistency.
+func WithMinerMinTip(tip *big.Int) func(nodeConf *node.Config, ethConf *ethconfig.Config) {
+ if tip == nil || tip.Cmp(new(big.Int)) <= 0 {
+ panic("invalid miner minimum tip")
+ }
+ return func(nodeConf *node.Config, ethConf *ethconfig.Config) {
+ ethConf.Miner.GasPrice = tip
+ }
+}
diff --git a/ethclient/simulated/options_test.go b/ethclient/simulated/options_test.go
index d9ff3b428a..9ff2be5ff9 100644
--- a/ethclient/simulated/options_test.go
+++ b/ethclient/simulated/options_test.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
@@ -31,7 +32,7 @@ import (
// and that it keeps the same target value.
func TestWithBlockGasLimitOption(t *testing.T) {
// Construct a simulator, targeting a different gas limit
- sim := NewBackend(core.GenesisAlloc{}, WithBlockGasLimit(12_345_678))
+ sim := NewBackend(types.GenesisAlloc{}, WithBlockGasLimit(12_345_678))
defer sim.Close()
client := sim.Client()
@@ -56,7 +57,7 @@ func TestWithBlockGasLimitOption(t *testing.T) {
// Tests that the simulator honors the RPC call caps set by the options.
func TestWithCallGasLimitOption(t *testing.T) {
// Construct a simulator, targeting a different gas limit
- sim := NewBackend(core.GenesisAlloc{
+ sim := NewBackend(types.GenesisAlloc{
testAddr: {Balance: big.NewInt(10000000000000000)},
}, WithCallGasLimit(params.TxGas-1))
defer sim.Close()
diff --git a/ethdb/database.go b/ethdb/database.go
index 5af19e3478..cb694fe420 100644
--- a/ethdb/database.go
+++ b/ethdb/database.go
@@ -155,11 +155,16 @@ type AncientStater interface {
AncientDatadir() (string, error)
}
+type StateStoreReader interface {
+ StateStoreReader() Reader
+}
+
// Reader contains the methods required to read data from both key-value as well as
// immutable ancient data.
type Reader interface {
KeyValueReader
AncientReader
+ StateStoreReader
}
// Writer contains the methods required to write data to both key-value as well as
@@ -189,12 +194,18 @@ type DiffStore interface {
SetDiffStore(diff KeyValueStore)
}
+type StateStore interface {
+ StateStore() Database
+ SetStateStore(state Database)
+}
+
// Database contains all the methods required by the high level database to not
// only access the key-value data store but also the chain freezer.
type Database interface {
Reader
Writer
DiffStore
+ StateStore
Batcher
Iteratee
Stater
diff --git a/ethdb/remotedb/remotedb.go b/ethdb/remotedb/remotedb.go
index babb625d88..0214126153 100644
--- a/ethdb/remotedb/remotedb.go
+++ b/ethdb/remotedb/remotedb.go
@@ -94,6 +94,18 @@ func (db *Database) SetDiffStore(diff ethdb.KeyValueStore) {
panic("not supported")
}
+func (db *Database) StateStore() ethdb.Database {
+ panic("not supported")
+}
+
+func (db *Database) SetStateStore(state ethdb.Database) {
+ panic("not supported")
+}
+
+func (db *Database) StateStoreReader() ethdb.Reader {
+ return db
+}
+
func (db *Database) ReadAncients(fn func(op ethdb.AncientReaderOp) error) (err error) {
return fn(db)
}
diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go
index 29559991be..61ceec443e 100644
--- a/ethstats/ethstats.go
+++ b/ethstats/ethstats.go
@@ -611,6 +611,10 @@ func (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {
// Gather the block details from the header or block chain
details := s.assembleBlockStats(block)
+ // Short circuit if the block detail is not available.
+ if details == nil {
+ return nil
+ }
// Assemble the block report and send it to the server
log.Trace("Sending new block to ethstats", "number", details.Number, "hash", details.Hash)
@@ -638,10 +642,16 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
// check if backend is a full node
fullBackend, ok := s.backend.(fullNodeBackend)
if ok {
+ // Retrieve current chain head if no block is given.
if block == nil {
head := fullBackend.CurrentBlock()
block, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(head.Number.Uint64()))
}
+ // Short circuit if no block is available. It might happen when
+ // the blockchain is reorging.
+ if block == nil {
+ return nil
+ }
header = block.Header()
td = fullBackend.GetTd(context.Background(), header.Hash())
diff --git a/go.mod b/go.mod
index 5fac2011ed..c660fbb6d8 100644
--- a/go.mod
+++ b/go.mod
@@ -12,6 +12,7 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.18.45
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
+ github.com/bnb-chain/fastssz v0.1.2
github.com/bnb-chain/ics23 v0.1.0
github.com/btcsuite/btcd/btcec/v2 v2.3.2
github.com/cespare/cp v1.1.1
@@ -28,13 +29,13 @@ require (
github.com/fatih/color v1.13.0
github.com/fatih/structs v1.1.0
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e
- github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
+ github.com/fjl/memsize v0.0.2
github.com/fsnotify/fsnotify v1.6.0
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08
github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46
github.com/gofrs/flock v0.8.1
github.com/golang-jwt/jwt/v4 v4.5.0
- github.com/golang/protobuf v1.5.3
+ github.com/golang/protobuf v1.5.4
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
github.com/google/gofuzz v1.2.0
github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b
@@ -43,7 +44,7 @@ require (
github.com/graph-gophers/graphql-go v1.3.0
github.com/hashicorp/go-bexpr v0.1.10
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
- github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7
+ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/holiman/uint256 v1.2.4
github.com/huin/goupnp v1.3.0
@@ -281,7 +282,7 @@ require (
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.56.3 // indirect
- google.golang.org/protobuf v1.30.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apimachinery v0.20.0 // indirect
diff --git a/go.sum b/go.sum
index 53a2bb50a3..db1a5cc097 100644
--- a/go.sum
+++ b/go.sum
@@ -210,6 +210,8 @@ github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsy
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
+github.com/bnb-chain/fastssz v0.1.2 h1:vTcXw5SwCtRYnl/BEclujiml7GXiVOZ74tub4GHpvlM=
+github.com/bnb-chain/fastssz v0.1.2/go.mod h1:KcabV+OEw2QwgyY8Fc88ZG79CKYkFdu0kKWyfA3dI6o=
github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2 h1:jubavYCs/mCFj/g6Utl+l4SfpykdBdWJFPsvb9FcEXU=
github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2/go.mod h1:9q11eHNRY9FDwFH+4pompzPNGv//Z3VcfvkELaHJPMs=
github.com/bnb-chain/ics23 v0.1.0 h1:DvjGOts2FBfbxB48384CYD1LbcrfjThFz8kowY/7KxU=
@@ -431,8 +433,9 @@ github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEE
github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
-github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
+github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
+github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
@@ -606,8 +609,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -763,8 +766,8 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J
github.com/herumi/bls-eth-go-binary v0.0.0-20210130185500-57372fb27371/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e h1:wCMygKUQhmcQAjlk2Gquzq6dLmyMv2kF+llRspoRgrk=
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
-github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
-github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
+github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
+github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
@@ -2345,8 +2348,8 @@ google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI=
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
index f91229d015..1dda102058 100644
--- a/graphql/graphql_test.go
+++ b/graphql/graphql_test.go
@@ -189,7 +189,7 @@ func TestGraphQLBlockSerializationEIP2718(t *testing.T) {
Config: params.AllEthashProtocolChanges,
GasLimit: 11500000,
Difficulty: big.NewInt(1048576),
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
address: {Balance: funds},
// The address 0xdad sloads 0x00 and 0x01
dad: {
@@ -286,7 +286,7 @@ func TestGraphQLConcurrentResolvers(t *testing.T) {
Config: params.AllEthashProtocolChanges,
GasLimit: 11500000,
Difficulty: big.NewInt(1048576),
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr: {Balance: big.NewInt(params.Ether)},
dad: {
// LOG0(0, 0), LOG0(0, 0), RETURN(0, 0)
@@ -379,7 +379,7 @@ func TestWithdrawals(t *testing.T) {
Config: params.AllEthashProtocolChanges,
GasLimit: 11500000,
Difficulty: common.Big1,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
addr: {Balance: big.NewInt(params.Ether)},
},
}
diff --git a/interfaces.go b/interfaces.go
index c6aee295ee..53e2e3ae16 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -152,6 +152,10 @@ type CallMsg struct {
Data []byte // input data, usually an ABI-encoded contract method invocation
AccessList types.AccessList // EIP-2930 access list.
+
+ // For BlobTxType
+ BlobGasFeeCap *big.Int
+ BlobHashes []common.Hash
}
// A ContractCaller provides contract calls, essentially transactions that are executed by
diff --git a/internal/build/download.go b/internal/build/download.go
index 903d0308df..fda573df83 100644
--- a/internal/build/download.go
+++ b/internal/build/download.go
@@ -40,7 +40,7 @@ func MustLoadChecksums(file string) *ChecksumDB {
if err != nil {
log.Fatal("can't load checksum file: " + err.Error())
}
- return &ChecksumDB{strings.Split(string(content), "\n")}
+ return &ChecksumDB{strings.Split(strings.ReplaceAll(string(content), "\r\n", "\n"), "\n")}
}
// Verify checks whether the given file is valid according to the checksum database.
diff --git a/internal/era/accumulator.go b/internal/era/accumulator.go
new file mode 100644
index 0000000000..8be05cfff4
--- /dev/null
+++ b/internal/era/accumulator.go
@@ -0,0 +1,91 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package era
+
+import (
+ "fmt"
+ "math/big"
+
+ ssz "github.com/bnb-chain/fastssz"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// ComputeAccumulator calculates the SSZ hash tree root of the Era1
+// accumulator of header records.
+func ComputeAccumulator(hashes []common.Hash, tds []*big.Int) (common.Hash, error) {
+ if len(hashes) != len(tds) {
+ return common.Hash{}, fmt.Errorf("must have equal number hashes as td values")
+ }
+ if len(hashes) > MaxEra1Size {
+ return common.Hash{}, fmt.Errorf("too many records: have %d, max %d", len(hashes), MaxEra1Size)
+ }
+ hh := ssz.NewHasher()
+ for i := range hashes {
+ rec := headerRecord{hashes[i], tds[i]}
+ root, err := rec.HashTreeRoot()
+ if err != nil {
+ return common.Hash{}, err
+ }
+ hh.Append(root[:])
+ }
+ hh.MerkleizeWithMixin(0, uint64(len(hashes)), uint64(MaxEra1Size))
+ return hh.HashRoot()
+}
+
+// headerRecord is an individual record for a historical header.
+//
+// See https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
+// for more information.
+type headerRecord struct {
+ Hash common.Hash
+ TotalDifficulty *big.Int
+}
+
+// GetTree completes the ssz.HashRoot interface, but is unused.
+func (h *headerRecord) GetTree() (*ssz.Node, error) {
+ return nil, nil
+}
+
+// HashTreeRoot ssz hashes the headerRecord object.
+func (h *headerRecord) HashTreeRoot() ([32]byte, error) {
+ return ssz.HashWithDefaultHasher(h)
+}
+
+// HashTreeRootWith ssz hashes the headerRecord object with a hasher.
+func (h *headerRecord) HashTreeRootWith(hh ssz.HashWalker) (err error) {
+ hh.PutBytes(h.Hash[:])
+ td := bigToBytes32(h.TotalDifficulty)
+ hh.PutBytes(td[:])
+ hh.Merkleize(0)
+ return
+}
+
+// bigToBytes32 converts a big.Int into a little-endian 32-byte array.
+func bigToBytes32(n *big.Int) (b [32]byte) {
+ n.FillBytes(b[:])
+ reverseOrder(b[:])
+ return
+}
+
+// reverseOrder reverses the byte order of a slice.
+func reverseOrder(b []byte) []byte {
+ for i := 0; i < 16; i++ {
+ b[i], b[32-i-1] = b[32-i-1], b[i]
+ }
+ return b
+}
diff --git a/internal/era/builder.go b/internal/era/builder.go
new file mode 100644
index 0000000000..9217c049f3
--- /dev/null
+++ b/internal/era/builder.go
@@ -0,0 +1,224 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+package era
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/era/e2store"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/golang/snappy"
+)
+
+// Builder is used to create Era1 archives of block data.
+//
+// Era1 files are themselves e2store files. For more information on this format,
+// see https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md.
+//
+// The overall structure of an Era1 file follows closely the structure of an Era file
+// which contains consensus Layer data (and as a byproduct, EL data after the merge).
+//
+// The structure can be summarized through this definition:
+//
+// era1 := Version | block-tuple* | other-entries* | Accumulator | BlockIndex
+// block-tuple := CompressedHeader | CompressedBody | CompressedReceipts | TotalDifficulty
+//
+// Each basic element is its own entry:
+//
+// Version = { type: [0x65, 0x32], data: nil }
+// CompressedHeader = { type: [0x03, 0x00], data: snappyFramed(rlp(header)) }
+// CompressedBody = { type: [0x04, 0x00], data: snappyFramed(rlp(body)) }
+// CompressedReceipts = { type: [0x05, 0x00], data: snappyFramed(rlp(receipts)) }
+// TotalDifficulty = { type: [0x06, 0x00], data: uint256(header.total_difficulty) }
+// AccumulatorRoot = { type: [0x07, 0x00], data: accumulator-root }
+// BlockIndex = { type: [0x32, 0x66], data: block-index }
+//
+// Accumulator is computed by constructing an SSZ list of header-records of length at most
+// 8192 and then calculating the hash_tree_root of that list.
+//
+// header-record := { block-hash: Bytes32, total-difficulty: Uint256 }
+// accumulator := hash_tree_root([]header-record, 8192)
+//
+// BlockIndex stores relative offsets to each compressed block entry. The
+// format is:
+//
+// block-index := starting-number | index | index | index ... | count
+//
+// starting-number is the first block number in the archive. Every index is a
+// defined relative to beginning of the record. The total number of block
+// entries in the file is recorded with count.
+//
+// Due to the accumulator size limit of 8192, the maximum number of blocks in
+// an Era1 batch is also 8192.
+type Builder struct {
+ w *e2store.Writer
+ startNum *uint64
+ startTd *big.Int
+ indexes []uint64
+ hashes []common.Hash
+ tds []*big.Int
+ written int
+
+ buf *bytes.Buffer
+ snappy *snappy.Writer
+}
+
+// NewBuilder returns a new Builder instance.
+func NewBuilder(w io.Writer) *Builder {
+ buf := bytes.NewBuffer(nil)
+ return &Builder{
+ w: e2store.NewWriter(w),
+ buf: buf,
+ snappy: snappy.NewBufferedWriter(buf),
+ }
+}
+
+// Add writes a compressed block entry and compressed receipts entry to the
+// underlying e2store file.
+func (b *Builder) Add(block *types.Block, receipts types.Receipts, td *big.Int) error {
+ eh, err := rlp.EncodeToBytes(block.Header())
+ if err != nil {
+ return err
+ }
+ eb, err := rlp.EncodeToBytes(block.Body())
+ if err != nil {
+ return err
+ }
+ er, err := rlp.EncodeToBytes(receipts)
+ if err != nil {
+ return err
+ }
+ return b.AddRLP(eh, eb, er, block.NumberU64(), block.Hash(), td, block.Difficulty())
+}
+
+// AddRLP writes a compressed block entry and compressed receipts entry to the
+// underlying e2store file.
+func (b *Builder) AddRLP(header, body, receipts []byte, number uint64, hash common.Hash, td, difficulty *big.Int) error {
+ // Write Era1 version entry before first block.
+ if b.startNum == nil {
+ n, err := b.w.Write(TypeVersion, nil)
+ if err != nil {
+ return err
+ }
+ startNum := number
+ b.startNum = &startNum
+ b.startTd = new(big.Int).Sub(td, difficulty)
+ b.written += n
+ }
+ if len(b.indexes) >= MaxEra1Size {
+ return fmt.Errorf("exceeds maximum batch size of %d", MaxEra1Size)
+ }
+
+ b.indexes = append(b.indexes, uint64(b.written))
+ b.hashes = append(b.hashes, hash)
+ b.tds = append(b.tds, td)
+
+ // Write block data.
+ if err := b.snappyWrite(TypeCompressedHeader, header); err != nil {
+ return err
+ }
+ if err := b.snappyWrite(TypeCompressedBody, body); err != nil {
+ return err
+ }
+ if err := b.snappyWrite(TypeCompressedReceipts, receipts); err != nil {
+ return err
+ }
+
+ // Also write total difficulty, but don't snappy encode.
+ btd := bigToBytes32(td)
+ n, err := b.w.Write(TypeTotalDifficulty, btd[:])
+ b.written += n
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Finalize computes the accumulator and block index values, then writes the
+// corresponding e2store entries.
+func (b *Builder) Finalize() (common.Hash, error) {
+ if b.startNum == nil {
+ return common.Hash{}, fmt.Errorf("finalize called on empty builder")
+ }
+ // Compute accumulator root and write entry.
+ root, err := ComputeAccumulator(b.hashes, b.tds)
+ if err != nil {
+ return common.Hash{}, fmt.Errorf("error calculating accumulator root: %w", err)
+ }
+ n, err := b.w.Write(TypeAccumulator, root[:])
+ b.written += n
+ if err != nil {
+ return common.Hash{}, fmt.Errorf("error writing accumulator: %w", err)
+ }
+ // Get beginning of index entry to calculate block relative offset.
+ base := int64(b.written)
+
+ // Construct block index. Detailed format described in Builder
+ // documentation, but it is essentially encoded as:
+ // "start | index | index | ... | count"
+ var (
+ count = len(b.indexes)
+ index = make([]byte, 16+count*8)
+ )
+ binary.LittleEndian.PutUint64(index, *b.startNum)
+ // Each offset is relative from the position it is encoded in the
+ // index. This means that even if the same block was to be included in
+ // the index twice (this would be invalid anyways), the relative offset
+ // would be different. The idea with this is that after reading a
+ // relative offset, the corresponding block can be quickly read by
+ // performing a seek relative to the current position.
+ for i, offset := range b.indexes {
+ relative := int64(offset) - base
+ binary.LittleEndian.PutUint64(index[8+i*8:], uint64(relative))
+ }
+ binary.LittleEndian.PutUint64(index[8+count*8:], uint64(count))
+
+ // Finally, write the block index entry.
+ if _, err := b.w.Write(TypeBlockIndex, index); err != nil {
+ return common.Hash{}, fmt.Errorf("unable to write block index: %w", err)
+ }
+
+ return root, nil
+}
+
+// snappyWrite is a small helper to take care snappy encoding and writing an e2store entry.
+func (b *Builder) snappyWrite(typ uint16, in []byte) error {
+ var (
+ buf = b.buf
+ s = b.snappy
+ )
+ buf.Reset()
+ s.Reset(buf)
+ if _, err := b.snappy.Write(in); err != nil {
+ return fmt.Errorf("error snappy encoding: %w", err)
+ }
+ if err := s.Flush(); err != nil {
+ return fmt.Errorf("error flushing snappy encoding: %w", err)
+ }
+ n, err := b.w.Write(typ, b.buf.Bytes())
+ b.written += n
+ if err != nil {
+ return fmt.Errorf("error writing e2store entry: %w", err)
+ }
+ return nil
+}
diff --git a/internal/era/e2store/e2store.go b/internal/era/e2store/e2store.go
new file mode 100644
index 0000000000..d85b3e44e9
--- /dev/null
+++ b/internal/era/e2store/e2store.go
@@ -0,0 +1,220 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package e2store
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+const (
+ headerSize = 8
+ valueSizeLimit = 1024 * 1024 * 50
+)
+
+// Entry is a variable-length-data record in an e2store.
+type Entry struct {
+ Type uint16
+ Value []byte
+}
+
+// Writer writes entries using e2store encoding.
+// For more information on this format, see:
+// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
+type Writer struct {
+ w io.Writer
+}
+
+// NewWriter returns a new Writer that writes to w.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{w}
+}
+
+// Write writes a single e2store entry to w.
+// An entry is encoded in a type-length-value format. The first 8 bytes of the
+// record store the type (2 bytes), the length (4 bytes), and some reserved
+// data (2 bytes). The remaining bytes store b.
+func (w *Writer) Write(typ uint16, b []byte) (int, error) {
+ buf := make([]byte, headerSize)
+ binary.LittleEndian.PutUint16(buf, typ)
+ binary.LittleEndian.PutUint32(buf[2:], uint32(len(b)))
+
+ // Write header.
+ if n, err := w.w.Write(buf); err != nil {
+ return n, err
+ }
+ // Write value, return combined write size.
+ n, err := w.w.Write(b)
+ return n + headerSize, err
+}
+
+// A Reader reads entries from an e2store-encoded file.
+// For more information on this format, see
+// https://github.com/status-im/nimbus-eth2/blob/stable/docs/e2store.md
+type Reader struct {
+ r io.ReaderAt
+ offset int64
+}
+
+// NewReader returns a new Reader that reads from r.
+func NewReader(r io.ReaderAt) *Reader {
+ return &Reader{r, 0}
+}
+
+// Read reads one Entry from r.
+func (r *Reader) Read() (*Entry, error) {
+ var e Entry
+ n, err := r.ReadAt(&e, r.offset)
+ if err != nil {
+ return nil, err
+ }
+ r.offset += int64(n)
+ return &e, nil
+}
+
+// ReadAt reads one Entry from r at the specified offset.
+func (r *Reader) ReadAt(entry *Entry, off int64) (int, error) {
+ typ, length, err := r.ReadMetadataAt(off)
+ if err != nil {
+ return 0, err
+ }
+ entry.Type = typ
+
+ // Check length bounds.
+ if length > valueSizeLimit {
+ return headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
+ }
+ if length == 0 {
+ return headerSize, nil
+ }
+
+ // Read value.
+ val := make([]byte, length)
+ if n, err := r.r.ReadAt(val, off+headerSize); err != nil {
+ n += headerSize
+ // An entry with a non-zero length should not return EOF when
+ // reading the value.
+ if err == io.EOF {
+ return n, io.ErrUnexpectedEOF
+ }
+ return n, err
+ }
+ entry.Value = val
+ return int(headerSize + length), nil
+}
+
+// ReaderAt returns an io.Reader delivering value data for the entry at
+// the specified offset. If the entry type does not match the expected type, an
+// error is returned.
+func (r *Reader) ReaderAt(expectedType uint16, off int64) (io.Reader, int, error) {
+ // problem = need to return length+headerSize not just value length via section reader
+ typ, length, err := r.ReadMetadataAt(off)
+ if err != nil {
+ return nil, headerSize, err
+ }
+ if typ != expectedType {
+ return nil, headerSize, fmt.Errorf("wrong type, want %d have %d", expectedType, typ)
+ }
+ if length > valueSizeLimit {
+ return nil, headerSize, fmt.Errorf("item larger than item size limit %d: have %d", valueSizeLimit, length)
+ }
+ return io.NewSectionReader(r.r, off+headerSize, int64(length)), headerSize + int(length), nil
+}
+
+// LengthAt reads the header at off and returns the total length of the entry,
+// including header.
+func (r *Reader) LengthAt(off int64) (int64, error) {
+ _, length, err := r.ReadMetadataAt(off)
+ if err != nil {
+ return 0, err
+ }
+ return int64(length) + headerSize, nil
+}
+
+// ReadMetadataAt reads the header metadata at the given offset.
+func (r *Reader) ReadMetadataAt(off int64) (typ uint16, length uint32, err error) {
+ b := make([]byte, headerSize)
+ if n, err := r.r.ReadAt(b, off); err != nil {
+ if err == io.EOF && n > 0 {
+ return 0, 0, io.ErrUnexpectedEOF
+ }
+ return 0, 0, err
+ }
+ typ = binary.LittleEndian.Uint16(b)
+ length = binary.LittleEndian.Uint32(b[2:])
+
+ // Check reserved bytes of header.
+ if b[6] != 0 || b[7] != 0 {
+ return 0, 0, fmt.Errorf("reserved bytes are non-zero")
+ }
+
+ return typ, length, nil
+}
+
+// Find returns the first entry with the matching type.
+func (r *Reader) Find(want uint16) (*Entry, error) {
+ var (
+ off int64
+ typ uint16
+ length uint32
+ err error
+ )
+ for {
+ typ, length, err = r.ReadMetadataAt(off)
+ if err == io.EOF {
+ return nil, io.EOF
+ } else if err != nil {
+ return nil, err
+ }
+ if typ == want {
+ var e Entry
+ if _, err := r.ReadAt(&e, off); err != nil {
+ return nil, err
+ }
+ return &e, nil
+ }
+ off += int64(headerSize + length)
+ }
+}
+
+// FindAll returns all entries with the matching type.
+func (r *Reader) FindAll(want uint16) ([]*Entry, error) {
+ var (
+ off int64
+ typ uint16
+ length uint32
+ entries []*Entry
+ err error
+ )
+ for {
+ typ, length, err = r.ReadMetadataAt(off)
+ if err == io.EOF {
+ return entries, nil
+ } else if err != nil {
+ return entries, err
+ }
+ if typ == want {
+ e := new(Entry)
+ if _, err := r.ReadAt(e, off); err != nil {
+ return entries, err
+ }
+ entries = append(entries, e)
+ }
+ off += int64(headerSize + length)
+ }
+}
diff --git a/internal/era/e2store/e2store_test.go b/internal/era/e2store/e2store_test.go
new file mode 100644
index 0000000000..febcffe4cf
--- /dev/null
+++ b/internal/era/e2store/e2store_test.go
@@ -0,0 +1,150 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package e2store
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+func TestEncode(t *testing.T) {
+ for _, test := range []struct {
+ entries []Entry
+ want string
+ name string
+ }{
+ {
+ name: "emptyEntry",
+ entries: []Entry{{0xffff, nil}},
+ want: "ffff000000000000",
+ },
+ {
+ name: "beef",
+ entries: []Entry{{42, common.Hex2Bytes("beef")}},
+ want: "2a00020000000000beef",
+ },
+ {
+ name: "twoEntries",
+ entries: []Entry{
+ {42, common.Hex2Bytes("beef")},
+ {9, common.Hex2Bytes("abcdabcd")},
+ },
+ want: "2a00020000000000beef0900040000000000abcdabcd",
+ },
+ } {
+ tt := test
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+ var (
+ b = bytes.NewBuffer(nil)
+ w = NewWriter(b)
+ )
+ for _, e := range tt.entries {
+ if _, err := w.Write(e.Type, e.Value); err != nil {
+ t.Fatalf("encoding error: %v", err)
+ }
+ }
+ if want, have := common.FromHex(tt.want), b.Bytes(); !bytes.Equal(want, have) {
+ t.Fatalf("encoding mismatch (want %x, have %x", want, have)
+ }
+ r := NewReader(bytes.NewReader(b.Bytes()))
+ for _, want := range tt.entries {
+ have, err := r.Read()
+ if err != nil {
+ t.Fatalf("decoding error: %v", err)
+ }
+ if have.Type != want.Type {
+ t.Fatalf("decoded entry does type mismatch (want %v, got %v)", want.Type, have.Type)
+ }
+ if !bytes.Equal(have.Value, want.Value) {
+ t.Fatalf("decoded entry does not match (want %#x, got %#x)", want.Value, have.Value)
+ }
+ }
+ })
+ }
+}
+
+func TestDecode(t *testing.T) {
+ for i, tt := range []struct {
+ have string
+ err error
+ }{
+ { // basic valid decoding
+ have: "ffff000000000000",
+ },
+ { // basic invalid decoding
+ have: "ffff000000000001",
+ err: fmt.Errorf("reserved bytes are non-zero"),
+ },
+ { // no more entries to read, returns EOF
+ have: "",
+ err: io.EOF,
+ },
+ { // malformed type
+ have: "bad",
+ err: io.ErrUnexpectedEOF,
+ },
+ { // malformed length
+ have: "badbeef",
+ err: io.ErrUnexpectedEOF,
+ },
+ { // specified length longer than actual value
+ have: "beef010000000000",
+ err: io.ErrUnexpectedEOF,
+ },
+ } {
+ r := NewReader(bytes.NewReader(common.FromHex(tt.have)))
+ if tt.err != nil {
+ _, err := r.Read()
+ if err == nil && tt.err != nil {
+ t.Fatalf("test %d, expected error, got none", i)
+ }
+ if err != nil && tt.err == nil {
+ t.Fatalf("test %d, expected no error, got %v", i, err)
+ }
+ if err != nil && tt.err != nil && err.Error() != tt.err.Error() {
+ t.Fatalf("expected error %v, got %v", tt.err, err)
+ }
+ continue
+ }
+ }
+}
+
+func FuzzCodec(f *testing.F) {
+ f.Fuzz(func(t *testing.T, input []byte) {
+ r := NewReader(bytes.NewReader(input))
+ entry, err := r.Read()
+ if err != nil {
+ return
+ }
+ var (
+ b = bytes.NewBuffer(nil)
+ w = NewWriter(b)
+ )
+ w.Write(entry.Type, entry.Value)
+ output := b.Bytes()
+ // Only care about the input that was actually consumed
+ input = input[:r.offset]
+ if !bytes.Equal(input, output) {
+ t.Fatalf("decode-encode mismatch, input %#x output %#x", input, output)
+ }
+ })
+}
diff --git a/internal/era/era.go b/internal/era/era.go
new file mode 100644
index 0000000000..a0e701b7e0
--- /dev/null
+++ b/internal/era/era.go
@@ -0,0 +1,283 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package era
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/era/e2store"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/golang/snappy"
+)
+
+var (
+ TypeVersion uint16 = 0x3265
+ TypeCompressedHeader uint16 = 0x03
+ TypeCompressedBody uint16 = 0x04
+ TypeCompressedReceipts uint16 = 0x05
+ TypeTotalDifficulty uint16 = 0x06
+ TypeAccumulator uint16 = 0x07
+ TypeBlockIndex uint16 = 0x3266
+
+ MaxEra1Size = 8192
+)
+
+// Filename returns a recognizable Era1-formatted file name for the specified
+// epoch and network.
+func Filename(network string, epoch int, root common.Hash) string {
+ return fmt.Sprintf("%s-%05d-%s.era1", network, epoch, root.Hex()[2:10])
+}
+
+// ReadDir reads all the era1 files in a directory for a given network.
+// Format: --.era1
+func ReadDir(dir, network string) ([]string, error) {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, fmt.Errorf("error reading directory %s: %w", dir, err)
+ }
+ var (
+ next = uint64(0)
+ eras []string
+ )
+ for _, entry := range entries {
+ if path.Ext(entry.Name()) != ".era1" {
+ continue
+ }
+ parts := strings.Split(entry.Name(), "-")
+ if len(parts) != 3 || parts[0] != network {
+ // invalid era1 filename, skip
+ continue
+ }
+ if epoch, err := strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return nil, fmt.Errorf("malformed era1 filename: %s", entry.Name())
+ } else if epoch != next {
+ return nil, fmt.Errorf("missing epoch %d", next)
+ }
+ next += 1
+ eras = append(eras, entry.Name())
+ }
+ return eras, nil
+}
+
+type ReadAtSeekCloser interface {
+ io.ReaderAt
+ io.Seeker
+ io.Closer
+}
+
+// Era reads and Era1 file.
+type Era struct {
+ f ReadAtSeekCloser // backing era1 file
+ s *e2store.Reader // e2store reader over f
+ m metadata // start, count, length info
+ mu *sync.Mutex // lock for buf
+ buf [8]byte // buffer reading entry offsets
+}
+
+// From returns an Era backed by f.
+func From(f ReadAtSeekCloser) (*Era, error) {
+ m, err := readMetadata(f)
+ if err != nil {
+ return nil, err
+ }
+ return &Era{
+ f: f,
+ s: e2store.NewReader(f),
+ m: m,
+ mu: new(sync.Mutex),
+ }, nil
+}
+
+// Open returns an Era backed by the given filename.
+func Open(filename string) (*Era, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ return From(f)
+}
+
+func (e *Era) Close() error {
+ return e.f.Close()
+}
+
+func (e *Era) GetBlockByNumber(num uint64) (*types.Block, error) {
+ if e.m.start > num || e.m.start+e.m.count <= num {
+ return nil, fmt.Errorf("out-of-bounds")
+ }
+ off, err := e.readOffset(num)
+ if err != nil {
+ return nil, err
+ }
+ r, n, err := newSnappyReader(e.s, TypeCompressedHeader, off)
+ if err != nil {
+ return nil, err
+ }
+ var header types.Header
+ if err := rlp.Decode(r, &header); err != nil {
+ return nil, err
+ }
+ off += n
+ r, _, err = newSnappyReader(e.s, TypeCompressedBody, off)
+ if err != nil {
+ return nil, err
+ }
+ var body types.Body
+ if err := rlp.Decode(r, &body); err != nil {
+ return nil, err
+ }
+ return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
+}
+
+// Accumulator reads the accumulator entry in the Era1 file.
+func (e *Era) Accumulator() (common.Hash, error) {
+ entry, err := e.s.Find(TypeAccumulator)
+ if err != nil {
+ return common.Hash{}, err
+ }
+ return common.BytesToHash(entry.Value), nil
+}
+
+// InitialTD returns initial total difficulty before the difficulty of the
+// first block of the Era1 is applied.
+func (e *Era) InitialTD() (*big.Int, error) {
+ var (
+ r io.Reader
+ header types.Header
+ rawTd []byte
+ n int64
+ off int64
+ err error
+ )
+
+ // Read first header.
+ if off, err = e.readOffset(e.m.start); err != nil {
+ return nil, err
+ }
+ if r, n, err = newSnappyReader(e.s, TypeCompressedHeader, off); err != nil {
+ return nil, err
+ }
+ if err := rlp.Decode(r, &header); err != nil {
+ return nil, err
+ }
+ off += n
+
+ // Skip over next two records.
+ for i := 0; i < 2; i++ {
+ length, err := e.s.LengthAt(off)
+ if err != nil {
+ return nil, err
+ }
+ off += length
+ }
+
+ // Read total difficulty after first block.
+ if r, _, err = e.s.ReaderAt(TypeTotalDifficulty, off); err != nil {
+ return nil, err
+ }
+ rawTd, err = io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ td := new(big.Int).SetBytes(reverseOrder(rawTd))
+ return td.Sub(td, header.Difficulty), nil
+}
+
+// Start returns the listed start block.
+func (e *Era) Start() uint64 {
+ return e.m.start
+}
+
+// Count returns the total number of blocks in the Era1.
+func (e *Era) Count() uint64 {
+ return e.m.count
+}
+
+// readOffset reads a specific block's offset from the block index. The value n
+// is the absolute block number desired.
+func (e *Era) readOffset(n uint64) (int64, error) {
+ var (
+ blockIndexRecordOffset = e.m.length - 24 - int64(e.m.count)*8 // skips start, count, and header
+ firstIndex = blockIndexRecordOffset + 16 // first index after header / start-num
+ indexOffset = int64(n-e.m.start) * 8 // desired index * size of indexes
+ offOffset = firstIndex + indexOffset // offset of block offset
+ )
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ clearBuffer(e.buf[:])
+ if _, err := e.f.ReadAt(e.buf[:], offOffset); err != nil {
+ return 0, err
+ }
+ // Since the block offset is relative from the start of the block index record
+ // we need to add the record offset to it's offset to get the block's absolute
+ // offset.
+ return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
+}
+
+// newReader returns a snappy.Reader for the e2store entry value at off.
+func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
+ r, n, err := e.ReaderAt(expectedType, off)
+ if err != nil {
+ return nil, 0, err
+ }
+ return snappy.NewReader(r), int64(n), err
+}
+
+// clearBuffer zeroes out the buffer.
+func clearBuffer(buf []byte) {
+ for i := 0; i < len(buf); i++ {
+ buf[i] = 0
+ }
+}
+
+// metadata wraps the metadata in the block index.
+type metadata struct {
+ start uint64
+ count uint64
+ length int64
+}
+
+// readMetadata reads the metadata stored in an Era1 file's block index.
+func readMetadata(f ReadAtSeekCloser) (m metadata, err error) {
+ // Determine length of reader.
+ if m.length, err = f.Seek(0, io.SeekEnd); err != nil {
+ return
+ }
+ b := make([]byte, 16)
+ // Read count. It's the last 8 bytes of the file.
+ if _, err = f.ReadAt(b[:8], m.length-8); err != nil {
+ return
+ }
+ m.count = binary.LittleEndian.Uint64(b)
+ // Read start. It's at the offset -sizeof(m.count) -
+ // count*sizeof(indexEntry) - sizeof(m.start)
+ if _, err = f.ReadAt(b[8:], m.length-16-int64(m.count*8)); err != nil {
+ return
+ }
+ m.start = binary.LittleEndian.Uint64(b[8:])
+ return
+}
diff --git a/internal/era/era_test.go b/internal/era/era_test.go
new file mode 100644
index 0000000000..ee5d9e82a0
--- /dev/null
+++ b/internal/era/era_test.go
@@ -0,0 +1,142 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package era
+
+import (
+ "bytes"
+ "io"
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+type testchain struct {
+ headers [][]byte
+ bodies [][]byte
+ receipts [][]byte
+ tds []*big.Int
+}
+
+func TestEra1Builder(t *testing.T) {
+ // Get temp directory.
+ f, err := os.CreateTemp("", "era1-test")
+ if err != nil {
+ t.Fatalf("error creating temp file: %v", err)
+ }
+ defer f.Close()
+
+ var (
+ builder = NewBuilder(f)
+ chain = testchain{}
+ )
+ for i := 0; i < 128; i++ {
+ chain.headers = append(chain.headers, []byte{byte('h'), byte(i)})
+ chain.bodies = append(chain.bodies, []byte{byte('b'), byte(i)})
+ chain.receipts = append(chain.receipts, []byte{byte('r'), byte(i)})
+ chain.tds = append(chain.tds, big.NewInt(int64(i)))
+ }
+
+ // Write blocks to Era1.
+ for i := 0; i < len(chain.headers); i++ {
+ var (
+ header = chain.headers[i]
+ body = chain.bodies[i]
+ receipts = chain.receipts[i]
+ hash = common.Hash{byte(i)}
+ td = chain.tds[i]
+ )
+ if err = builder.AddRLP(header, body, receipts, uint64(i), hash, td, big.NewInt(1)); err != nil {
+ t.Fatalf("error adding entry: %v", err)
+ }
+ }
+
+ // Finalize Era1.
+ if _, err := builder.Finalize(); err != nil {
+ t.Fatalf("error finalizing era1: %v", err)
+ }
+
+ // Verify Era1 contents.
+ e, err := Open(f.Name())
+ if err != nil {
+ t.Fatalf("failed to open era: %v", err)
+ }
+ it, err := NewRawIterator(e)
+ if err != nil {
+ t.Fatalf("failed to make iterator: %s", err)
+ }
+ for i := uint64(0); i < uint64(len(chain.headers)); i++ {
+ if !it.Next() {
+ t.Fatalf("expected more entries")
+ }
+ if it.Error() != nil {
+ t.Fatalf("unexpected error %v", it.Error())
+ }
+ // Check headers.
+ header, err := io.ReadAll(it.Header)
+ if err != nil {
+ t.Fatalf("error reading header: %v", err)
+ }
+ if !bytes.Equal(header, chain.headers[i]) {
+ t.Fatalf("mismatched header: want %s, got %s", chain.headers[i], header)
+ }
+ // Check bodies.
+ body, err := io.ReadAll(it.Body)
+ if err != nil {
+ t.Fatalf("error reading body: %v", err)
+ }
+ if !bytes.Equal(body, chain.bodies[i]) {
+ t.Fatalf("mismatched body: want %s, got %s", chain.bodies[i], body)
+ }
+ // Check receipts.
+ receipts, err := io.ReadAll(it.Receipts)
+ if err != nil {
+ t.Fatalf("error reading receipts: %v", err)
+ }
+ if !bytes.Equal(receipts, chain.receipts[i]) {
+ t.Fatalf("mismatched receipts: want %s, got %s", chain.receipts[i], receipts)
+ }
+
+ // Check total difficulty.
+ rawTd, err := io.ReadAll(it.TotalDifficulty)
+ if err != nil {
+ t.Fatalf("error reading td: %v", err)
+ }
+ td := new(big.Int).SetBytes(reverseOrder(rawTd))
+ if td.Cmp(chain.tds[i]) != 0 {
+ t.Fatalf("mismatched tds: want %s, got %s", chain.tds[i], td)
+ }
+ }
+}
+
+func TestEraFilename(t *testing.T) {
+ for i, tt := range []struct {
+ network string
+ epoch int
+ root common.Hash
+ expected string
+ }{
+ {"mainnet", 1, common.Hash{1}, "mainnet-00001-01000000.era1"},
+ {"goerli", 99999, common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000"), "goerli-99999-deadbeef.era1"},
+ } {
+ got := Filename(tt.network, tt.epoch, tt.root)
+ if tt.expected != got {
+ t.Errorf("test %d: invalid filename: want %s, got %s", i, tt.expected, got)
+ }
+ }
+}
diff --git a/internal/era/iterator.go b/internal/era/iterator.go
new file mode 100644
index 0000000000..e74a8154b1
--- /dev/null
+++ b/internal/era/iterator.go
@@ -0,0 +1,197 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package era
+
+import (
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// Iterator wraps RawIterator and returns decoded Era1 entries.
+type Iterator struct {
+ inner *RawIterator
+}
+
+// NewRawIterator returns a new Iterator instance. Next must be immediately
+// called on new iterators to load the first item.
+func NewIterator(e *Era) (*Iterator, error) {
+ inner, err := NewRawIterator(e)
+ if err != nil {
+ return nil, err
+ }
+ return &Iterator{inner}, nil
+}
+
+// Next moves the iterator to the next block entry. It returns false when all
+// items have been read or an error has halted its progress. Block, Receipts,
+// and BlockAndReceipts should no longer be called after false is returned.
+func (it *Iterator) Next() bool {
+ return it.inner.Next()
+}
+
+// Number returns the current number block the iterator will return.
+func (it *Iterator) Number() uint64 {
+ return it.inner.next - 1
+}
+
+// Error returns the error status of the iterator. It should be called before
+// reading from any of the iterator's values.
+func (it *Iterator) Error() error {
+ return it.inner.Error()
+}
+
+// Block returns the block for the iterator's current position.
+func (it *Iterator) Block() (*types.Block, error) {
+ if it.inner.Header == nil || it.inner.Body == nil {
+ return nil, fmt.Errorf("header and body must be non-nil")
+ }
+ var (
+ header types.Header
+ body types.Body
+ )
+ if err := rlp.Decode(it.inner.Header, &header); err != nil {
+ return nil, err
+ }
+ if err := rlp.Decode(it.inner.Body, &body); err != nil {
+ return nil, err
+ }
+ return types.NewBlockWithHeader(&header).WithBody(body.Transactions, body.Uncles), nil
+}
+
+// Receipts returns the receipts for the iterator's current position.
+func (it *Iterator) Receipts() (types.Receipts, error) {
+ if it.inner.Receipts == nil {
+ return nil, fmt.Errorf("receipts must be non-nil")
+ }
+ var receipts types.Receipts
+ err := rlp.Decode(it.inner.Receipts, &receipts)
+ return receipts, err
+}
+
+// BlockAndReceipts returns the block and receipts for the iterator's current
+// position.
+func (it *Iterator) BlockAndReceipts() (*types.Block, types.Receipts, error) {
+ b, err := it.Block()
+ if err != nil {
+ return nil, nil, err
+ }
+ r, err := it.Receipts()
+ if err != nil {
+ return nil, nil, err
+ }
+ return b, r, nil
+}
+
+// TotalDifficulty returns the total difficulty for the iterator's current
+// position.
+func (it *Iterator) TotalDifficulty() (*big.Int, error) {
+ td, err := io.ReadAll(it.inner.TotalDifficulty)
+ if err != nil {
+ return nil, err
+ }
+ return new(big.Int).SetBytes(reverseOrder(td)), nil
+}
+
+// RawIterator reads an RLP-encode Era1 entries.
+type RawIterator struct {
+ e *Era // backing Era1
+ next uint64 // next block to read
+ err error // last error
+
+ Header io.Reader
+ Body io.Reader
+ Receipts io.Reader
+ TotalDifficulty io.Reader
+}
+
+// NewRawIterator returns a new RawIterator instance. Next must be immediately
+// called on new iterators to load the first item.
+func NewRawIterator(e *Era) (*RawIterator, error) {
+ return &RawIterator{
+ e: e,
+ next: e.m.start,
+ }, nil
+}
+
+// Next moves the iterator to the next block entry. It returns false when all
+// items have been read or an error has halted its progress. Header, Body,
+// Receipts, TotalDifficulty will be set to nil in the case returning false or
+// finding an error and should therefore no longer be read from.
+func (it *RawIterator) Next() bool {
+ // Clear old errors.
+ it.err = nil
+ if it.e.m.start+it.e.m.count <= it.next {
+ it.clear()
+ return false
+ }
+ off, err := it.e.readOffset(it.next)
+ if err != nil {
+ // Error here means block index is corrupted, so don't
+ // continue.
+ it.clear()
+ it.err = err
+ return false
+ }
+ var n int64
+ if it.Header, n, it.err = newSnappyReader(it.e.s, TypeCompressedHeader, off); it.err != nil {
+ it.clear()
+ return true
+ }
+ off += n
+ if it.Body, n, it.err = newSnappyReader(it.e.s, TypeCompressedBody, off); it.err != nil {
+ it.clear()
+ return true
+ }
+ off += n
+ if it.Receipts, n, it.err = newSnappyReader(it.e.s, TypeCompressedReceipts, off); it.err != nil {
+ it.clear()
+ return true
+ }
+ off += n
+ if it.TotalDifficulty, _, it.err = it.e.s.ReaderAt(TypeTotalDifficulty, off); it.err != nil {
+ it.clear()
+ return true
+ }
+ it.next += 1
+ return true
+}
+
+// Number returns the current number block the iterator will return.
+func (it *RawIterator) Number() uint64 {
+ return it.next - 1
+}
+
+// Error returns the error status of the iterator. It should be called before
+// reading from any of the iterator's values.
+func (it *RawIterator) Error() error {
+ if it.err == io.EOF {
+ return nil
+ }
+ return it.err
+}
+
+// clear sets all the outputs to nil.
+func (it *RawIterator) clear() {
+ it.Header = nil
+ it.Body = nil
+ it.Receipts = nil
+ it.TotalDifficulty = nil
+}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 4f9bfa2c40..f115e4230b 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -301,7 +301,7 @@ type PersonalAccountAPI struct {
b Backend
}
-// NewPersonalAccountAPI create a new PersonalAccountAPI.
+// NewPersonalAccountAPI creates a new PersonalAccountAPI.
func NewPersonalAccountAPI(b Backend, nonceLock *AddrLocker) *PersonalAccountAPI {
return &PersonalAccountAPI{
am: b.AccountManager(),
@@ -466,7 +466,7 @@ func (s *PersonalAccountAPI) signTransaction(ctx context.Context, args *Transact
return nil, err
}
// Set some sanity defaults and terminate on failure
- if err := args.setDefaults(ctx, s.b); err != nil {
+ if err := args.setDefaults(ctx, s.b, false); err != nil {
return nil, err
}
// Assemble the transaction and sign with the wallet
@@ -543,7 +543,7 @@ func (s *PersonalAccountAPI) SignTransaction(ctx context.Context, args Transacti
//
// The key used to calculate the signature is decrypted with the given password.
//
-// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_sign
+// https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-sign
func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr common.Address, passwd string) (hexutil.Bytes, error) {
// Look up the wallet containing the requested signer
account := accounts.Account{Address: addr}
@@ -571,7 +571,7 @@ func (s *PersonalAccountAPI) Sign(ctx context.Context, data hexutil.Bytes, addr
// Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons.
//
-// https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover
+// https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-personal#personal-ecrecover
func (s *PersonalAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (common.Address, error) {
if len(sig) != crypto.SignatureLength {
return common.Address{}, fmt.Errorf("signature must be %d bytes long", crypto.SignatureLength)
@@ -668,7 +668,7 @@ func (s *BlockChainAPI) GetBalance(ctx context.Context, address common.Address,
return (*hexutil.Big)(b), state.Error()
}
-// Result structs for GetProof
+// AccountResult structs for GetProof
type AccountResult struct {
Address common.Address `json:"address"`
AccountProof []string `json:"accountProof"`
@@ -1159,14 +1159,14 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
defer cancel()
// Get a new instance of the EVM.
- msg, err := args.ToMessage(globalGasCap, header.BaseFee)
- if err != nil {
- return nil, err
- }
blockCtx := core.NewEVMBlockContext(header, NewChainContext(ctx, b), nil)
if blockOverrides != nil {
blockOverrides.Apply(&blockCtx)
}
+ msg, err := args.ToMessage(globalGasCap, blockCtx.BaseFee)
+ if err != nil {
+ return nil, err
+ }
evm := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx)
// Wait for the context to be done and cancel the evm. Even if the
@@ -1424,7 +1424,7 @@ func (s *BlockChainAPI) replay(ctx context.Context, block *types.Block, accounts
// GetDiffAccountsWithScope returns detailed changes of some interested accounts in a specific block number.
func (s *BlockChainAPI) GetDiffAccountsWithScope(ctx context.Context, blockNr rpc.BlockNumber, accounts []common.Address) (*types.DiffAccountsInBlock, error) {
if s.b.Chain() == nil {
- return nil, fmt.Errorf("blockchain not support get diff accounts")
+ return nil, errors.New("blockchain not support get diff accounts")
}
block, err := s.b.BlockByNumber(ctx, blockNr)
@@ -1738,14 +1738,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH
if db == nil || err != nil {
return nil, 0, nil, err
}
- // If the gas amount is not set, default to RPC gas cap.
- if args.Gas == nil {
- tmp := hexutil.Uint64(b.RPCGasCap())
- args.Gas = &tmp
- }
// Ensure any missing fields are filled, extract the recipient and input data
- if err := args.setDefaults(ctx, b); err != nil {
+ if err := args.setDefaults(ctx, b, true); err != nil {
return nil, 0, nil, err
}
var to common.Address
@@ -1942,7 +1937,7 @@ func (s *TransactionAPI) GetTransactionReceiptsByBlockNumber(ctx context.Context
}
txs := block.Transactions()
if len(txs) != len(receipts) {
- return nil, fmt.Errorf("txs length doesn't equal to receipts' length")
+ return nil, errors.New("txs length doesn't equal to receipts' length")
}
txReceipts := make([]map[string]interface{}, 0, len(txs))
@@ -2172,7 +2167,7 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr
}
// Set some sanity defaults and terminate on failure
- if err := args.setDefaults(ctx, s.b); err != nil {
+ if err := args.setDefaults(ctx, s.b, false); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
@@ -2189,13 +2184,14 @@ func (s *TransactionAPI) SendTransaction(ctx context.Context, args TransactionAr
// on a given unsigned transaction, and returns it to the caller for further
// processing (signing + broadcast).
func (s *TransactionAPI) FillTransaction(ctx context.Context, args TransactionArgs) (*SignTransactionResult, error) {
+ args.blobSidecarAllowed = true
+
// Set some sanity defaults and terminate on failure
- if err := args.setDefaults(ctx, s.b); err != nil {
+ if err := args.setDefaults(ctx, s.b, false); err != nil {
return nil, err
}
// Assemble the transaction and obtain rlp
tx := args.toTransaction()
- // TODO(s1na): fill in blob proofs, commitments
data, err := tx.MarshalBinary()
if err != nil {
return nil, err
@@ -2278,7 +2274,7 @@ func (s *TransactionAPI) SignTransaction(ctx context.Context, args TransactionAr
if args.Nonce == nil {
return nil, errors.New("nonce not specified")
}
- if err := args.setDefaults(ctx, s.b); err != nil {
+ if err := args.setDefaults(ctx, s.b, false); err != nil {
return nil, err
}
// Before actually sign the transaction, ensure the transaction fee is reasonable.
@@ -2327,7 +2323,7 @@ func (s *TransactionAPI) Resend(ctx context.Context, sendArgs TransactionArgs, g
if sendArgs.Nonce == nil {
return common.Hash{}, errors.New("missing transaction nonce in transaction spec")
}
- if err := sendArgs.setDefaults(ctx, s.b); err != nil {
+ if err := sendArgs.setDefaults(ctx, s.b, false); err != nil {
return common.Hash{}, err
}
matchTx := sendArgs.toTransaction()
diff --git a/internal/ethapi/api_mev.go b/internal/ethapi/api_mev.go
new file mode 100644
index 0000000000..0fa92af1af
--- /dev/null
+++ b/internal/ethapi/api_mev.go
@@ -0,0 +1,111 @@
+package ethapi
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+const (
+ TransferTxGasLimit = 25000
+)
+
+// MevAPI implements the interfaces that defined in the BEP-322.
+// It offers methods for the interaction between builders and validators.
+type MevAPI struct {
+ b Backend
+}
+
+// NewMevAPI creates a new MevAPI.
+func NewMevAPI(b Backend) *MevAPI {
+ return &MevAPI{b}
+}
+
+// SendBid receives bid from the builders.
+// If mev is not running or bid is invalid, return error.
+// Otherwise, creates a builder bid for the given argument, submit it to the miner.
+func (m *MevAPI) SendBid(ctx context.Context, args *types.BidArgs) (common.Hash, error) {
+ if !m.b.MevRunning() {
+ return common.Hash{}, types.ErrMevNotRunning
+ }
+
+ if !m.b.MinerInTurn() {
+ return common.Hash{}, types.ErrMevNotInTurn
+ }
+
+ var (
+ rawBid = args.RawBid
+ currentHeader = m.b.CurrentHeader()
+ )
+
+ if rawBid == nil {
+ return common.Hash{}, types.NewInvalidBidError("rawBid should not be nil")
+ }
+
+ // only support bidding for the next block not for the future block
+ if rawBid.BlockNumber != currentHeader.Number.Uint64()+1 {
+ return common.Hash{}, types.NewInvalidBidError("stale block number or block in future")
+ }
+
+ if rawBid.ParentHash != currentHeader.Hash() {
+ return common.Hash{}, types.NewInvalidBidError(
+ fmt.Sprintf("non-aligned parent hash: %v", currentHeader.Hash()))
+ }
+
+ if rawBid.GasFee == nil || rawBid.GasFee.Cmp(common.Big0) == 0 || rawBid.GasUsed == 0 {
+ return common.Hash{}, types.NewInvalidBidError("empty gasFee or empty gasUsed")
+ }
+
+ if rawBid.BuilderFee != nil {
+ builderFee := rawBid.BuilderFee
+ if builderFee.Cmp(common.Big0) < 0 {
+ return common.Hash{}, types.NewInvalidBidError("builder fee should not be less than 0")
+ }
+
+ if builderFee.Cmp(common.Big0) == 0 {
+ if len(args.PayBidTx) != 0 || args.PayBidTxGasUsed != 0 {
+ return common.Hash{}, types.NewInvalidPayBidTxError("payBidTx should be nil when builder fee is 0")
+ }
+ }
+
+ if builderFee.Cmp(rawBid.GasFee) >= 0 {
+ return common.Hash{}, types.NewInvalidBidError("builder fee must be less than gas fee")
+ }
+
+ if builderFee.Cmp(common.Big0) > 0 {
+ // payBidTx can be nil when validator and builder take some other settlement
+
+ if args.PayBidTxGasUsed > TransferTxGasLimit {
+ return common.Hash{}, types.NewInvalidBidError(
+ fmt.Sprintf("transfer tx gas used must be no more than %v", TransferTxGasLimit))
+ }
+
+ if (len(args.PayBidTx) == 0 && args.PayBidTxGasUsed != 0) ||
+ (len(args.PayBidTx) != 0 && args.PayBidTxGasUsed == 0) {
+ return common.Hash{}, types.NewInvalidPayBidTxError("non-aligned payBidTx and payBidTxGasUsed")
+ }
+ }
+ } else {
+ if len(args.PayBidTx) != 0 || args.PayBidTxGasUsed != 0 {
+ return common.Hash{}, types.NewInvalidPayBidTxError("payBidTx should be nil when builder fee is nil")
+ }
+ }
+
+ return m.b.SendBid(ctx, args)
+}
+
+func (m *MevAPI) BestBidGasFee(_ context.Context, parentHash common.Hash) *big.Int {
+ return m.b.BestBidGasFee(parentHash)
+}
+
+func (m *MevAPI) Params() *types.MevParams {
+ return m.b.MevParams()
+}
+
+// Running returns true if mev is running
+func (m *MevAPI) Running() bool {
+ return m.b.MevRunning()
+}
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 409a00e7d0..26c6bad7f2 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -20,6 +20,7 @@ import (
"bytes"
"context"
"crypto/ecdsa"
+ "crypto/sha256"
"encoding/json"
"errors"
"fmt"
@@ -30,6 +31,10 @@ import (
"testing"
"time"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
@@ -45,14 +50,12 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/blocktest"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
)
func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) {
@@ -442,7 +445,7 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.E
}
)
accman, acc := newTestAccountManager(t)
- gspec.Alloc[acc.Address] = core.GenesisAccount{Balance: big.NewInt(params.Ether)}
+ gspec.Alloc[acc.Address] = types.Account{Balance: big.NewInt(params.Ether)}
// Generate blocks for testing
db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator)
txlookupLimit := uint64(0)
@@ -633,6 +636,23 @@ func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.Match
panic("implement me")
}
+func (b *testBackend) MevRunning() bool { return false }
+func (b *testBackend) MevParams() *types.MevParams {
+ return &types.MevParams{}
+}
+func (b *testBackend) StartMev() {}
+func (b *testBackend) StopMev() {}
+func (b *testBackend) AddBuilder(builder common.Address, builderUrl string) error { return nil }
+func (b *testBackend) RemoveBuilder(builder common.Address) error { return nil }
+func (b *testBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
+ panic("implement me")
+}
+func (b *testBackend) MinerInTurn() bool { return false }
+func (b *testBackend) BestBidGasFee(parentHash common.Hash) *big.Int {
+ //TODO implement me
+ panic("implement me")
+}
+
func TestEstimateGas(t *testing.T) {
t.Parallel()
// Initialize test accounts
@@ -640,7 +660,7 @@ func TestEstimateGas(t *testing.T) {
accounts = newAccounts(2)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
},
@@ -797,7 +817,7 @@ func TestCall(t *testing.T) {
accounts = newAccounts(3)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
accounts[0].addr: {Balance: big.NewInt(params.Ether)},
accounts[1].addr: {Balance: big.NewInt(params.Ether)},
accounts[2].addr: {Balance: big.NewInt(params.Ether)},
@@ -994,7 +1014,7 @@ func TestSignTransaction(t *testing.T) {
to = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
- Alloc: core.GenesisAlloc{},
+ Alloc: types.GenesisAlloc{},
}
)
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
@@ -1032,7 +1052,7 @@ func TestSignBlobTransaction(t *testing.T) {
to = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
- Alloc: core.GenesisAlloc{},
+ Alloc: types.GenesisAlloc{},
}
)
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
@@ -1066,7 +1086,7 @@ func TestSendBlobTransaction(t *testing.T) {
to = crypto.PubkeyToAddress(key.PublicKey)
genesis = &core.Genesis{
Config: params.MergedTestChainConfig,
- Alloc: core.GenesisAlloc{},
+ Alloc: types.GenesisAlloc{},
}
)
b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
@@ -1091,6 +1111,195 @@ func TestSendBlobTransaction(t *testing.T) {
}
}
+func TestFillBlobTransaction(t *testing.T) {
+ t.Parallel()
+ // Initialize test accounts
+ var (
+ key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
+ to = crypto.PubkeyToAddress(key.PublicKey)
+ genesis = &core.Genesis{
+ Config: params.MergedTestChainConfig,
+ Alloc: types.GenesisAlloc{},
+ }
+ emptyBlob = kzg4844.Blob{}
+ emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob)
+ emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit)
+ emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
+ )
+ b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) {
+ b.SetPoS()
+ })
+ api := NewTransactionAPI(b, nil)
+ type result struct {
+ Hashes []common.Hash
+ Sidecar *types.BlobTxSidecar
+ }
+ suite := []struct {
+ name string
+ args TransactionArgs
+ err string
+ want *result
+ }{
+ {
+ name: "TestInvalidParamsCombination1",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{{}},
+ Proofs: []kzg4844.Proof{{}},
+ },
+ err: `blob proofs provided while commitments were not`,
+ },
+ {
+ name: "TestInvalidParamsCombination2",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{{}},
+ Commitments: []kzg4844.Commitment{{}},
+ },
+ err: `blob commitments provided while proofs were not`,
+ },
+ {
+ name: "TestInvalidParamsCount1",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{{}},
+ Commitments: []kzg4844.Commitment{{}, {}},
+ Proofs: []kzg4844.Proof{{}, {}},
+ },
+ err: `number of blobs and commitments mismatch (have=2, want=1)`,
+ },
+ {
+ name: "TestInvalidParamsCount2",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{{}, {}},
+ Commitments: []kzg4844.Commitment{{}, {}},
+ Proofs: []kzg4844.Proof{{}},
+ },
+ err: `number of blobs and proofs mismatch (have=1, want=2)`,
+ },
+ {
+ name: "TestInvalidProofVerification",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{{}, {}},
+ Commitments: []kzg4844.Commitment{{}, {}},
+ Proofs: []kzg4844.Proof{{}, {}},
+ },
+ err: `failed to verify blob proof: short buffer`,
+ },
+ {
+ name: "TestGenerateBlobHashes",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ want: &result{
+ Hashes: []common.Hash{emptyBlobHash},
+ Sidecar: &types.BlobTxSidecar{
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ },
+ },
+ {
+ name: "TestValidBlobHashes",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ BlobHashes: []common.Hash{emptyBlobHash},
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ want: &result{
+ Hashes: []common.Hash{emptyBlobHash},
+ Sidecar: &types.BlobTxSidecar{
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ },
+ },
+ {
+ name: "TestInvalidBlobHashes",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ BlobHashes: []common.Hash{{0x01, 0x22}},
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ err: fmt.Sprintf("blob hash verification failed (have=%s, want=%s)", common.Hash{0x01, 0x22}, emptyBlobHash),
+ },
+ {
+ name: "TestGenerateBlobProofs",
+ args: TransactionArgs{
+ From: &b.acc.Address,
+ To: &to,
+ Value: (*hexutil.Big)(big.NewInt(1)),
+ Blobs: []kzg4844.Blob{emptyBlob},
+ },
+ want: &result{
+ Hashes: []common.Hash{emptyBlobHash},
+ Sidecar: &types.BlobTxSidecar{
+ Blobs: []kzg4844.Blob{emptyBlob},
+ Commitments: []kzg4844.Commitment{emptyBlobCommit},
+ Proofs: []kzg4844.Proof{emptyBlobProof},
+ },
+ },
+ },
+ }
+ for _, tc := range suite {
+ t.Run(tc.name, func(t *testing.T) {
+ res, err := api.FillTransaction(context.Background(), tc.args)
+ if len(tc.err) > 0 {
+ if err == nil {
+ t.Fatalf("missing error. want: %s", tc.err)
+ } else if err != nil && err.Error() != tc.err {
+ t.Fatalf("error mismatch. want: %s, have: %s", tc.err, err.Error())
+ }
+ return
+ }
+ if err != nil && len(tc.err) == 0 {
+ t.Fatalf("expected no error. have: %s", err)
+ }
+ if res == nil {
+ t.Fatal("result missing")
+ }
+ want, err := json.Marshal(tc.want)
+ if err != nil {
+ t.Fatalf("failed to encode expected: %v", err)
+ }
+ have, err := json.Marshal(result{Hashes: res.Tx.BlobHashes(), Sidecar: res.Tx.BlobTxSidecar()})
+ if err != nil {
+ t.Fatalf("failed to encode computed sidecar: %v", err)
+ }
+ if !bytes.Equal(have, want) {
+ t.Errorf("blob sidecar mismatch. Have: %s, want: %s", have, want)
+ }
+ })
+ }
+}
+
func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs {
var (
gas = tx.Gas()
@@ -1359,7 +1568,7 @@ func TestRPCGetBlockOrHeader(t *testing.T) {
acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey)
genesis = &core.Genesis{
Config: params.TestChainConfig,
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
acc1Addr: {Balance: big.NewInt(params.Ether)},
acc2Addr: {Balance: big.NewInt(params.Ether)},
},
@@ -1614,7 +1823,7 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
Config: &config,
ExcessBlobGas: new(uint64),
BlobGasUsed: new(uint64),
- Alloc: core.GenesisAlloc{
+ Alloc: types.GenesisAlloc{
acc1Addr: {Balance: big.NewInt(params.Ether)},
acc2Addr: {Balance: big.NewInt(params.Ether)},
// // SPDX-License-Identifier: GPL-3.0
@@ -1639,6 +1848,7 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
tx *types.Transaction
err error
)
+ b.SetPoS()
switch i {
case 0:
// transfer 1000wei
@@ -1687,7 +1897,6 @@ func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Ha
b.AddTx(tx)
txHashes[i] = tx.Hash()
}
- b.SetPoS()
})
return backend, txHashes
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index a304e5d9ec..0f37e4f0f5 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -101,6 +101,25 @@ type Backend interface {
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
SubscribeFinalizedHeaderEvent(ch chan<- core.FinalizedHeaderEvent) event.Subscription
SubscribeNewVoteEvent(chan<- core.NewVoteEvent) event.Subscription
+
+ // MevRunning return true if mev is running
+ MevRunning() bool
+ // MevParams returns the static params of mev
+ MevParams() *types.MevParams
+ // StartMev starts mev
+ StartMev()
+ // StopMev stops mev
+ StopMev()
+ // AddBuilder adds a builder to the bid simulator.
+ AddBuilder(builder common.Address, builderUrl string) error
+ // RemoveBuilder removes a builder from the bid simulator.
+ RemoveBuilder(builder common.Address) error
+ // SendBid receives bid from the builders.
+ SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error)
+ // BestBidGasFee returns the gas fee of the best bid for the given parent hash.
+ BestBidGasFee(parentHash common.Hash) *big.Int
+ // MinerInTurn returns true if the validator is in turn to propose the block.
+ MinerInTurn() bool
}
func GetAPIs(apiBackend Backend) []rpc.API {
@@ -127,6 +146,9 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "personal",
Service: NewPersonalAccountAPI(apiBackend, nonceLock),
+ }, {
+ Namespace: "mev",
+ Service: NewMevAPI(apiBackend),
},
}
}
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index 333011d71f..b492a6b482 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -19,6 +19,7 @@ package ethapi
import (
"bytes"
"context"
+ "crypto/sha256"
"errors"
"fmt"
"math/big"
@@ -29,11 +30,17 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/holiman/uint256"
)
+var (
+ maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
+)
+
// TransactionArgs represents the arguments to construct a new transaction
// or a message call.
type TransactionArgs struct {
@@ -56,9 +63,17 @@ type TransactionArgs struct {
AccessList *types.AccessList `json:"accessList,omitempty"`
ChainID *hexutil.Big `json:"chainId,omitempty"`
- // Introduced by EIP-4844.
+ // For BlobTxType
BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas"`
BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
+
+ // For BlobTxType transactions with blob sidecar
+ Blobs []kzg4844.Blob `json:"blobs"`
+ Commitments []kzg4844.Commitment `json:"commitments"`
+ Proofs []kzg4844.Proof `json:"proofs"`
+
+ // This configures whether blobs are allowed to be passed.
+ blobSidecarAllowed bool
}
// from retrieves the transaction sender address.
@@ -81,10 +96,14 @@ func (args *TransactionArgs) data() []byte {
}
// setDefaults fills in default values for unspecified tx fields.
-func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
+func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend, skipGasEstimation bool) error {
+ if err := args.setBlobTxSidecar(ctx, b); err != nil {
+ return err
+ }
if err := args.setFeeDefaults(ctx, b); err != nil {
return err
}
+
if args.Value == nil {
args.Value = new(hexutil.Big)
}
@@ -98,38 +117,58 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
}
- if args.BlobHashes != nil && args.To == nil {
- return errors.New(`blob transactions cannot have the form of a create transaction`)
- }
+
+ // BlobTx fields
if args.BlobHashes != nil && len(args.BlobHashes) == 0 {
return errors.New(`need at least 1 blob for a blob transaction`)
}
- if args.To == nil && len(args.data()) == 0 {
- return errors.New(`contract creation without any data provided`)
+ if args.BlobHashes != nil && len(args.BlobHashes) > maxBlobsPerTransaction {
+ return fmt.Errorf(`too many blobs in transaction (have=%d, max=%d)`, len(args.BlobHashes), maxBlobsPerTransaction)
}
- // Estimate the gas usage if necessary.
- if args.Gas == nil {
- // These fields are immutable during the estimation, safe to
- // pass the pointer directly.
- data := args.data()
- callArgs := TransactionArgs{
- From: args.From,
- To: args.To,
- GasPrice: args.GasPrice,
- MaxFeePerGas: args.MaxFeePerGas,
- MaxPriorityFeePerGas: args.MaxPriorityFeePerGas,
- Value: args.Value,
- Data: (*hexutil.Bytes)(&data),
- AccessList: args.AccessList,
+
+ // create check
+ if args.To == nil {
+ if args.BlobHashes != nil {
+ return errors.New(`missing "to" in blob transaction`)
}
- latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
- estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, b.RPCGasCap())
- if err != nil {
- return err
+ if len(args.data()) == 0 {
+ return errors.New(`contract creation without any data provided`)
+ }
+ }
+
+ if args.Gas == nil {
+ if skipGasEstimation { // Skip gas usage estimation if a precise gas limit is not critical, e.g., in non-transaction calls.
+ gas := hexutil.Uint64(b.RPCGasCap())
+ if gas == 0 {
+ gas = hexutil.Uint64(math.MaxUint64 / 2)
+ }
+ args.Gas = &gas
+ } else { // Estimate the gas usage otherwise.
+ // These fields are immutable during the estimation, safe to
+ // pass the pointer directly.
+ data := args.data()
+ callArgs := TransactionArgs{
+ From: args.From,
+ To: args.To,
+ GasPrice: args.GasPrice,
+ MaxFeePerGas: args.MaxFeePerGas,
+ MaxPriorityFeePerGas: args.MaxPriorityFeePerGas,
+ Value: args.Value,
+ Data: (*hexutil.Bytes)(&data),
+ AccessList: args.AccessList,
+ BlobFeeCap: args.BlobFeeCap,
+ BlobHashes: args.BlobHashes,
+ }
+ latestBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)
+ estimated, err := DoEstimateGas(ctx, b, callArgs, latestBlockNr, nil, b.RPCGasCap())
+ if err != nil {
+ return err
+ }
+ args.Gas = &estimated
+ log.Trace("Estimate gas usage automatically", "gas", args.Gas)
}
- args.Gas = &estimated
- log.Trace("Estimate gas usage automatically", "gas", args.Gas)
}
+
// If chain id is provided, ensure it matches the local chain id. Otherwise, set the local
// chain id as the default.
want := b.ChainConfig().ChainID
@@ -145,6 +184,14 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error {
// setFeeDefaults fills in default fee values for unspecified tx fields.
func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) error {
+ head := b.CurrentHeader()
+ // Sanity check the EIP-4844 fee parameters.
+ if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
+ return errors.New("maxFeePerBlobGas, if specified, must be non-zero")
+ }
+ if err := args.setCancunFeeDefaults(ctx, head, b); err != nil {
+ return err
+ }
// If both gasPrice and at least one of the EIP-1559 fee parameters are specified, error.
if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) {
return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
@@ -154,7 +201,6 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
// other tx values. See https://github.com/ethereum/go-ethereum/pull/23274
// for more information.
eip1559ParamsSet := args.MaxFeePerGas != nil && args.MaxPriorityFeePerGas != nil
-
// Sanity check the EIP-1559 fee parameters if present.
if args.GasPrice == nil && eip1559ParamsSet {
if args.MaxFeePerGas.ToInt().Sign() == 0 {
@@ -165,12 +211,8 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
}
return nil // No need to set anything, user already set MaxFeePerGas and MaxPriorityFeePerGas
}
- // Sanity check the EIP-4844 fee parameters.
- if args.BlobFeeCap != nil && args.BlobFeeCap.ToInt().Sign() == 0 {
- return errors.New("maxFeePerBlobGas must be non-zero")
- }
+
// Sanity check the non-EIP-1559 fee parameters.
- head := b.CurrentHeader()
isLondon := b.ChainConfig().IsLondon(head.Number)
if args.GasPrice != nil && !eip1559ParamsSet {
// Zero gas-price is not allowed after London fork
@@ -181,21 +223,14 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
}
// Now attempt to fill in default value depending on whether London is active or not.
- if b.ChainConfig().IsCancun(head.Number, head.Time) {
- if err := args.setCancunFeeDefaults(ctx, head, b); err != nil {
- return err
- }
- } else if isLondon {
- if args.BlobFeeCap != nil {
- return errors.New("maxFeePerBlobGas is not valid before Cancun is active")
- }
+ if isLondon {
// London is active, set maxPriorityFeePerGas and maxFeePerGas.
if err := args.setLondonFeeDefaults(ctx, head, b); err != nil {
return err
}
} else {
- if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil || args.BlobFeeCap != nil {
- return errors.New("maxFeePerGas and maxPriorityFeePerGas and maxFeePerBlobGas are not valid before London is active")
+ if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil {
+ return errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active")
}
// London not active, set gas price.
price, err := b.SuggestGasTipCap(ctx)
@@ -211,15 +246,19 @@ func (args *TransactionArgs) setFeeDefaults(ctx context.Context, b Backend) erro
func (args *TransactionArgs) setCancunFeeDefaults(ctx context.Context, head *types.Header, b Backend) error {
// Set maxFeePerBlobGas if it is missing.
if args.BlobHashes != nil && args.BlobFeeCap == nil {
+ var excessBlobGas uint64
+ if head.ExcessBlobGas != nil {
+ excessBlobGas = *head.ExcessBlobGas
+ }
// ExcessBlobGas must be set for a Cancun block.
- blobBaseFee := eip4844.CalcBlobFee(*head.ExcessBlobGas)
+ blobBaseFee := eip4844.CalcBlobFee(excessBlobGas)
// Set the max fee to be 2 times larger than the previous block's blob base fee.
// The additional slack allows the tx to not become invalidated if the base
// fee is rising.
val := new(big.Int).Mul(blobBaseFee, big.NewInt(2))
args.BlobFeeCap = (*hexutil.Big)(val)
}
- return args.setLondonFeeDefaults(ctx, head, b)
+ return nil
}
// setLondonFeeDefaults fills in reasonable default fee values for unspecified fields.
@@ -250,6 +289,81 @@ func (args *TransactionArgs) setLondonFeeDefaults(ctx context.Context, head *typ
return nil
}
+// setBlobTxSidecar adds the blob tx
+func (args *TransactionArgs) setBlobTxSidecar(ctx context.Context, b Backend) error {
+ // No blobs, we're done.
+ if args.Blobs == nil {
+ return nil
+ }
+
+ // Passing blobs is not allowed in all contexts, only in specific methods.
+ if !args.blobSidecarAllowed {
+ return errors.New(`"blobs" is not supported for this RPC method`)
+ }
+
+ n := len(args.Blobs)
+ // Assume user provides either only blobs (w/o hashes), or
+ // blobs together with commitments and proofs.
+ if args.Commitments == nil && args.Proofs != nil {
+ return errors.New(`blob proofs provided while commitments were not`)
+ } else if args.Commitments != nil && args.Proofs == nil {
+ return errors.New(`blob commitments provided while proofs were not`)
+ }
+
+ // len(blobs) == len(commitments) == len(proofs) == len(hashes)
+ if args.Commitments != nil && len(args.Commitments) != n {
+ return fmt.Errorf("number of blobs and commitments mismatch (have=%d, want=%d)", len(args.Commitments), n)
+ }
+ if args.Proofs != nil && len(args.Proofs) != n {
+ return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), n)
+ }
+ if args.BlobHashes != nil && len(args.BlobHashes) != n {
+ return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
+ }
+
+ if args.Commitments == nil {
+ // Generate commitment and proof.
+ commitments := make([]kzg4844.Commitment, n)
+ proofs := make([]kzg4844.Proof, n)
+ for i, b := range args.Blobs {
+ c, err := kzg4844.BlobToCommitment(b)
+ if err != nil {
+ return fmt.Errorf("blobs[%d]: error computing commitment: %v", i, err)
+ }
+ commitments[i] = c
+ p, err := kzg4844.ComputeBlobProof(b, c)
+ if err != nil {
+ return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
+ }
+ proofs[i] = p
+ }
+ args.Commitments = commitments
+ args.Proofs = proofs
+ } else {
+ for i, b := range args.Blobs {
+ if err := kzg4844.VerifyBlobProof(b, args.Commitments[i], args.Proofs[i]); err != nil {
+ return fmt.Errorf("failed to verify blob proof: %v", err)
+ }
+ }
+ }
+
+ hashes := make([]common.Hash, n)
+ hasher := sha256.New()
+ for i, c := range args.Commitments {
+ hashes[i] = kzg4844.CalcBlobHashV1(hasher, &c)
+ }
+ if args.BlobHashes != nil {
+ for i, h := range hashes {
+ if h != args.BlobHashes[i] {
+ return fmt.Errorf("blob hash verification failed (have=%s, want=%s)", args.BlobHashes[i], h)
+ }
+ }
+ } else {
+ args.BlobHashes = hashes
+ }
+ return nil
+}
+
// ToMessage converts the transaction arguments to the Message type used by the
// core evm. This method is used in calls and traces that do not require a real
// live transaction.
@@ -363,6 +477,14 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
BlobHashes: args.BlobHashes,
BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
}
+ if args.Blobs != nil {
+ data.(*types.BlobTx).Sidecar = &types.BlobTxSidecar{
+ Blobs: args.Blobs,
+ Commitments: args.Commitments,
+ Proofs: args.Proofs,
+ }
+ }
+
case args.MaxFeePerGas != nil:
al := types.AccessList{}
if args.AccessList != nil {
@@ -379,6 +501,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
Data: args.data(),
AccessList: al,
}
+
case args.AccessList != nil:
data = &types.AccessListTx{
To: args.To,
@@ -390,6 +513,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
Data: args.data(),
AccessList: *args.AccessList,
}
+
default:
data = &types.LegacyTx{
To: args.To,
@@ -403,12 +527,6 @@ func (args *TransactionArgs) toTransaction() *types.Transaction {
return types.NewTx(data)
}
-// ToTransaction converts the arguments to a transaction.
-// This assumes that setDefaults has been called.
-func (args *TransactionArgs) ToTransaction() *types.Transaction {
- return args.toTransaction()
-}
-
// IsEIP4844 returns an indicator if the args contains EIP4844 fields.
func (args *TransactionArgs) IsEIP4844() bool {
return args.BlobHashes != nil || args.BlobFeeCap != nil
diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go
index 6c3402ec53..f08fc2b059 100644
--- a/internal/ethapi/transaction_args_test.go
+++ b/internal/ethapi/transaction_args_test.go
@@ -153,14 +153,14 @@ func TestSetFeeDefaults(t *testing.T) {
"legacy",
&TransactionArgs{MaxFeePerGas: maxFee},
nil,
- errors.New("maxFeePerGas and maxPriorityFeePerGas and maxFeePerBlobGas are not valid before London is active"),
+ errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"),
},
{
"dynamic fee tx pre-London, priorityFee set",
"legacy",
&TransactionArgs{MaxPriorityFeePerGas: fortytwo},
nil,
- errors.New("maxFeePerGas and maxPriorityFeePerGas and maxFeePerBlobGas are not valid before London is active"),
+ errors.New("maxFeePerGas and maxPriorityFeePerGas are not valid before London is active"),
},
{
"dynamic fee tx, maxFee < priorityFee",
@@ -207,20 +207,6 @@ func TestSetFeeDefaults(t *testing.T) {
errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified"),
},
// EIP-4844
- {
- "set maxFeePerBlobGas pre cancun",
- "london",
- &TransactionArgs{BlobFeeCap: fortytwo},
- nil,
- errors.New("maxFeePerBlobGas is not valid before Cancun is active"),
- },
- {
- "set maxFeePerBlobGas pre london",
- "legacy",
- &TransactionArgs{BlobFeeCap: fortytwo},
- nil,
- errors.New("maxFeePerGas and maxPriorityFeePerGas and maxFeePerBlobGas are not valid before London is active"),
- },
{
"set gas price and maxFee for blob transaction",
"cancun",
@@ -235,6 +221,13 @@ func TestSetFeeDefaults(t *testing.T) {
&TransactionArgs{BlobHashes: []common.Hash{}, BlobFeeCap: (*hexutil.Big)(big.NewInt(4)), MaxFeePerGas: maxFee, MaxPriorityFeePerGas: fortytwo},
nil,
},
+ {
+ "fill maxFeePerBlobGas when dynamic fees are set",
+ "cancun",
+ &TransactionArgs{BlobHashes: []common.Hash{}, MaxFeePerGas: maxFee, MaxPriorityFeePerGas: fortytwo},
+ &TransactionArgs{BlobHashes: []common.Hash{}, BlobFeeCap: (*hexutil.Big)(big.NewInt(4)), MaxFeePerGas: maxFee, MaxPriorityFeePerGas: fortytwo},
+ nil,
+ },
}
ctx := context.Background()
@@ -244,11 +237,16 @@ func TestSetFeeDefaults(t *testing.T) {
}
got := test.in
err := got.setFeeDefaults(ctx, b)
- if err != nil && err.Error() == test.err.Error() {
- // Test threw expected error.
+ if err != nil {
+ if test.err == nil {
+ t.Fatalf("test %d (%s): unexpected error: %s", i, test.name, err)
+ } else if err.Error() != test.err.Error() {
+ t.Fatalf("test %d (%s): unexpected error: (got: %s, want: %s)", i, test.name, err, test.err)
+ }
+ // Matching error.
continue
- } else if err != nil {
- t.Fatalf("test %d (%s): unexpected error: %s", i, test.name, err)
+ } else if test.err != nil {
+ t.Fatalf("test %d (%s): expected error: %s", i, test.name, test.err)
}
if !reflect.DeepEqual(got, test.want) {
t.Fatalf("test %d (%s): did not fill defaults as expected: (got: %v, want: %v)", i, test.name, got, test.want)
@@ -414,3 +412,19 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent)
}
func (b *backendMock) Engine() consensus.Engine { return nil }
+
+func (b *backendMock) MevRunning() bool { return false }
+func (b *backendMock) MevParams() *types.MevParams {
+ return &types.MevParams{}
+}
+func (b *backendMock) StartMev() {}
+func (b *backendMock) StopMev() {}
+func (b *backendMock) AddBuilder(builder common.Address, builderUrl string) error { return nil }
+func (b *backendMock) RemoveBuilder(builder common.Address) error { return nil }
+func (b *backendMock) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) {
+ panic("implement me")
+}
+func (b *backendMock) MinerInTurn() bool { return false }
+func (b *backendMock) BestBidGasFee(parentHash common.Hash) *big.Int {
+ panic("implement me")
+}
diff --git a/internal/flags/flags.go b/internal/flags/flags.go
index 69e9743556..bf62c53adf 100644
--- a/internal/flags/flags.go
+++ b/internal/flags/flags.go
@@ -256,7 +256,8 @@ type BigFlag struct {
Hidden bool
HasBeenSet bool
- Value *big.Int
+ Value *big.Int
+ defaultValue *big.Int
Aliases []string
EnvVars []string
@@ -269,6 +270,10 @@ func (f *BigFlag) IsSet() bool { return f.HasBeenSet }
func (f *BigFlag) String() string { return cli.FlagStringer(f) }
func (f *BigFlag) Apply(set *flag.FlagSet) error {
+ // Set default value so that environment wont be able to overwrite it
+ if f.Value != nil {
+ f.defaultValue = new(big.Int).Set(f.Value)
+ }
for _, envVar := range f.EnvVars {
envVar = strings.TrimSpace(envVar)
if value, found := syscall.Getenv(envVar); found {
@@ -283,7 +288,6 @@ func (f *BigFlag) Apply(set *flag.FlagSet) error {
f.Value = new(big.Int)
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
})
-
return nil
}
@@ -310,7 +314,7 @@ func (f *BigFlag) GetDefaultText() string {
if f.DefaultText != "" {
return f.DefaultText
}
- return f.GetValue()
+ return f.defaultValue.String()
}
// bigValue turns *big.Int into a flag.Value
diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go
index 369a931e8a..0112724fa1 100644
--- a/internal/flags/helpers.go
+++ b/internal/flags/helpers.go
@@ -115,7 +115,7 @@ func doMigrateFlags(ctx *cli.Context) {
for _, parent := range ctx.Lineage()[1:] {
if parent.IsSet(name) {
// When iterating across the lineage, we will be served both
- // the 'canon' and alias formats of all commmands. In most cases,
+ // the 'canon' and alias formats of all commands. In most cases,
// it's fine to set it in the ctx multiple times (one for each
// name), however, the Slice-flags are not fine.
// The slice-flags accumulate, so if we set it once as
diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js
index 6c7270cfb8..7afe766c1f 100644
--- a/internal/jsre/deps/web3.js
+++ b/internal/jsre/deps/web3.js
@@ -2031,7 +2031,7 @@ var fromAscii = function(str) {
*
* @method transformToFullName
* @param {Object} json-abi
- * @return {String} full fnction/event name
+ * @return {String} full function/event name
*/
var transformToFullName = function (json) {
if (json.name.indexOf('(') !== -1) {
@@ -2361,7 +2361,7 @@ var isFunction = function (object) {
};
/**
- * Returns true if object is Objet, otherwise false
+ * Returns true if object is Object, otherwise false
*
* @method isObject
* @param {Object}
@@ -2757,7 +2757,7 @@ var Batch = function (web3) {
* Should be called to add create new request to batch request
*
* @method add
- * @param {Object} jsonrpc requet object
+ * @param {Object} jsonrpc request object
*/
Batch.prototype.add = function (request) {
this.requests.push(request);
@@ -4593,7 +4593,7 @@ Iban.createIndirect = function (options) {
};
/**
- * Thos method should be used to check if given string is valid iban object
+ * This method should be used to check if given string is valid iban object
*
* @method isValid
* @param {String} iban string
@@ -6774,7 +6774,7 @@ var exchangeAbi = require('../contracts/SmartExchange.json');
* @method transfer
* @param {String} from
* @param {String} to iban
- * @param {Value} value to be tranfered
+ * @param {Value} value to be transferred
* @param {Function} callback, callback
*/
var transfer = function (eth, from, to, value, callback) {
@@ -6804,7 +6804,7 @@ var transfer = function (eth, from, to, value, callback) {
* @method transferToAddress
* @param {String} from
* @param {String} to
- * @param {Value} value to be tranfered
+ * @param {Value} value to be transferred
* @param {Function} callback, callback
*/
var transferToAddress = function (eth, from, to, value, callback) {
@@ -7158,7 +7158,7 @@ module.exports = transfer;
/**
* Initializes a newly created cipher.
*
- * @param {number} xformMode Either the encryption or decryption transormation mode constant.
+ * @param {number} xformMode Either the encryption or decryption transformation mode constant.
* @param {WordArray} key The key.
* @param {Object} cfg (Optional) The configuration options to use for this operation.
*
@@ -9512,7 +9512,7 @@ module.exports = transfer;
var M_offset_14 = M[offset + 14];
var M_offset_15 = M[offset + 15];
- // Working varialbes
+ // Working variables
var a = H[0];
var b = H[1];
var c = H[2];
diff --git a/log/handler_glog.go b/log/handler_glog.go
index fb1e03c5b5..f51bae2a4a 100644
--- a/log/handler_glog.go
+++ b/log/handler_glog.go
@@ -192,7 +192,7 @@ func (h *GlogHandler) Handle(_ context.Context, r slog.Record) error {
frame, _ := fs.Next()
for _, rule := range h.patterns {
- if rule.pattern.MatchString(fmt.Sprintf("%+s", frame.File)) {
+ if rule.pattern.MatchString(fmt.Sprintf("+%s", frame.File)) {
h.siteCache[r.PC], lvl, ok = rule.level, rule.level, true
}
}
diff --git a/log/logger_test.go b/log/logger_test.go
index 6d706c6cc7..8be8509ef5 100644
--- a/log/logger_test.go
+++ b/log/logger_test.go
@@ -2,6 +2,7 @@ package log
import (
"bytes"
+ "errors"
"fmt"
"io"
"math/big"
@@ -77,7 +78,7 @@ func benchmarkLogger(b *testing.B, l Logger) {
tt = time.Now()
bigint = big.NewInt(100)
nilbig *big.Int
- err = fmt.Errorf("Oh nooes it's crap")
+ err = errors.New("Oh nooes it's crap")
)
b.ReportAllocs()
b.ResetTimer()
@@ -106,7 +107,7 @@ func TestLoggerOutput(t *testing.T) {
tt = time.Time{}
bigint = big.NewInt(100)
nilbig *big.Int
- err = fmt.Errorf("Oh nooes it's crap")
+ err = errors.New("Oh nooes it's crap")
smallUint = uint256.NewInt(500_000)
bigUint = &uint256.Int{0xff, 0xff, 0xff, 0xff}
)
diff --git a/metrics/counter.go b/metrics/counter.go
index cb81599c21..dbe8e16a90 100644
--- a/metrics/counter.go
+++ b/metrics/counter.go
@@ -8,7 +8,7 @@ type CounterSnapshot interface {
Count() int64
}
-// Counters hold an int64 value that can be incremented and decremented.
+// Counter hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
Dec(int64)
diff --git a/metrics/disk.go b/metrics/disk.go
index 25142d2ad1..6e014e16d0 100644
--- a/metrics/disk.go
+++ b/metrics/disk.go
@@ -18,8 +18,10 @@ package metrics
// DiskStats is the per process disk io stats.
type DiskStats struct {
- ReadCount int64 // Number of read operations executed
- ReadBytes int64 // Total number of bytes read
- WriteCount int64 // Number of write operations executed
- WriteBytes int64 // Total number of byte written
+ ReadCount int64 // Number of read operations executed
+ ReadBytes int64 // Total number of bytes read (include disk cache)
+ WriteCount int64 // Number of write operations executed
+ WriteBytes int64 // Total number of byte written
+ ReadIOBytes int64 // Total number of io bytes read
+ WriteIOBytes int64 // Total number of io bytes write
}
diff --git a/metrics/disk_linux.go b/metrics/disk_linux.go
index 8d610cd674..1b3a99b2b3 100644
--- a/metrics/disk_linux.go
+++ b/metrics/disk_linux.go
@@ -67,6 +67,10 @@ func ReadDiskStats(stats *DiskStats) error {
stats.ReadBytes = value
case "wchar":
stats.WriteBytes = value
+ case "read_bytes":
+ stats.ReadIOBytes = value
+ case "write_bytes":
+ stats.WriteIOBytes = value
}
}
}
diff --git a/metrics/gauge.go b/metrics/gauge.go
index 68f8f11abc..5933df3107 100644
--- a/metrics/gauge.go
+++ b/metrics/gauge.go
@@ -2,12 +2,12 @@ package metrics
import "sync/atomic"
-// gaugeSnapshot contains a readonly int64.
+// GaugeSnapshot contains a readonly int64.
type GaugeSnapshot interface {
Value() int64
}
-// Gauges hold an int64 value that can be set arbitrarily.
+// Gauge holds an int64 value that can be set arbitrarily.
type Gauge interface {
Snapshot() GaugeSnapshot
Update(int64)
@@ -74,7 +74,7 @@ func (g *StandardGauge) Update(v int64) {
g.value.Store(v)
}
-// Update updates the gauge's value if v is larger then the current valie.
+// Update updates the gauge's value if v is larger then the current value.
func (g *StandardGauge) UpdateIfGt(v int64) {
for {
exist := g.value.Load()
diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go
index 967f2bc60e..c1c3c6b6e6 100644
--- a/metrics/gauge_float64.go
+++ b/metrics/gauge_float64.go
@@ -48,7 +48,7 @@ type gaugeFloat64Snapshot float64
// Value returns the value at the time the snapshot was taken.
func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
-// NilGauge is a no-op Gauge.
+// NilGaugeFloat64 is a no-op Gauge.
type NilGaugeFloat64 struct{}
func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go
index c44b2d85f3..0010edc324 100644
--- a/metrics/gauge_info.go
+++ b/metrics/gauge_info.go
@@ -9,7 +9,7 @@ type GaugeInfoSnapshot interface {
Value() GaugeInfoValue
}
-// GaugeInfos hold a GaugeInfoValue value that can be set arbitrarily.
+// GaugeInfo holds a GaugeInfoValue value that can be set arbitrarily.
type GaugeInfo interface {
Update(GaugeInfoValue)
Snapshot() GaugeInfoSnapshot
diff --git a/metrics/healthcheck.go b/metrics/healthcheck.go
index f1ae31e34a..adcd15ab58 100644
--- a/metrics/healthcheck.go
+++ b/metrics/healthcheck.go
@@ -1,6 +1,6 @@
package metrics
-// Healthchecks hold an error value describing an arbitrary up/down status.
+// Healthcheck holds an error value describing an arbitrary up/down status.
type Healthcheck interface {
Check()
Error() error
diff --git a/metrics/histogram.go b/metrics/histogram.go
index 44de588bc1..10259a2463 100644
--- a/metrics/histogram.go
+++ b/metrics/histogram.go
@@ -4,7 +4,7 @@ type HistogramSnapshot interface {
SampleSnapshot
}
-// Histograms calculate distribution statistics from a series of int64 values.
+// Histogram calculates distribution statistics from a series of int64 values.
type Histogram interface {
Clear()
Update(int64)
diff --git a/metrics/influxdb/influxdbv2.go b/metrics/influxdb/influxdbv2.go
index 0be5137d5e..114d57ae07 100644
--- a/metrics/influxdb/influxdbv2.go
+++ b/metrics/influxdb/influxdbv2.go
@@ -25,7 +25,7 @@ type v2Reporter struct {
write api.WriteAPI
}
-// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
+// InfluxDBV2WithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
func InfluxDBV2WithTags(r metrics.Registry, d time.Duration, endpoint string, token string, bucket string, organization string, namespace string, tags map[string]string) {
rep := &v2Reporter{
reg: r,
diff --git a/metrics/metrics.go b/metrics/metrics.go
index 9ca8f115c0..fba6781b97 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -171,27 +171,29 @@ func CollectProcessMetrics(refresh time.Duration) {
// Define the various metrics to collect
var (
- cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
- cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
- cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
- cpuSysLoadTotal = GetOrRegisterCounterFloat64("system/cpu/sysload/total", DefaultRegistry)
- cpuSysWaitTotal = GetOrRegisterCounterFloat64("system/cpu/syswait/total", DefaultRegistry)
- cpuProcLoadTotal = GetOrRegisterCounterFloat64("system/cpu/procload/total", DefaultRegistry)
- cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
- cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
- cpuSchedLatency = getOrRegisterRuntimeHistogram("system/cpu/schedlatency", secondsToNs, nil)
- memPauses = getOrRegisterRuntimeHistogram("system/memory/pauses", secondsToNs, nil)
- memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
- memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
- memTotal = GetOrRegisterGauge("system/memory/held", DefaultRegistry)
- heapUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry)
- heapObjects = GetOrRegisterGauge("system/memory/objects", DefaultRegistry)
- diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
- diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
- diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry)
- diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
- diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
- diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry)
+ cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
+ cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
+ cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
+ cpuSysLoadTotal = GetOrRegisterCounterFloat64("system/cpu/sysload/total", DefaultRegistry)
+ cpuSysWaitTotal = GetOrRegisterCounterFloat64("system/cpu/syswait/total", DefaultRegistry)
+ cpuProcLoadTotal = GetOrRegisterCounterFloat64("system/cpu/procload/total", DefaultRegistry)
+ cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
+ cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
+ cpuSchedLatency = getOrRegisterRuntimeHistogram("system/cpu/schedlatency", secondsToNs, nil)
+ memPauses = getOrRegisterRuntimeHistogram("system/memory/pauses", secondsToNs, nil)
+ memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
+ memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
+ memTotal = GetOrRegisterGauge("system/memory/held", DefaultRegistry)
+ heapUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry)
+ heapObjects = GetOrRegisterGauge("system/memory/objects", DefaultRegistry)
+ diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
+ diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
+ diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry)
+ diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
+ diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
+ diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry)
+ diskIOReadBytesCounter = GetOrRegisterCounter("system/disk/io/readbytes", DefaultRegistry)
+ diskIOWriteBytesCounter = GetOrRegisterCounter("system/disk/io/writebytes", DefaultRegistry)
)
var lastCollectTime time.Time
@@ -243,6 +245,8 @@ func CollectProcessMetrics(refresh time.Duration) {
diskWriteBytes.Mark(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
diskReadBytesCounter.Inc(diskstats[now].ReadBytes - diskstats[prev].ReadBytes)
diskWriteBytesCounter.Inc(diskstats[now].WriteBytes - diskstats[prev].WriteBytes)
+ diskIOReadBytesCounter.Inc(diskstats[now].ReadIOBytes - diskstats[prev].ReadIOBytes)
+ diskIOWriteBytesCounter.Inc(diskstats[now].WriteIOBytes - diskstats[prev].WriteIOBytes)
}
time.Sleep(refresh)
diff --git a/miner/bid_simulator.go b/miner/bid_simulator.go
new file mode 100644
index 0000000000..51dcf58355
--- /dev/null
+++ b/miner/bid_simulator.go
@@ -0,0 +1,688 @@
+package miner
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/http"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/bidutil"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/miner/builderclient"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+const (
+ // maxBidPerBuilderPerBlock is the max bid number per builder
+ maxBidPerBuilderPerBlock = 3
+
+ commitInterruptBetterBid = 1
+
+ // leftOverTimeRate is the rate of left over time to simulate a bid
+ leftOverTimeRate = 11
+ // leftOverTimeScale is the scale of left over time to simulate a bid
+ leftOverTimeScale = 10
+)
+
+var (
+ diffInTurn = big.NewInt(2) // the difficulty of a block that proposed by an in-turn validator
+)
+
+var (
+ dialer = &net.Dialer{
+ Timeout: time.Second,
+ KeepAlive: 60 * time.Second,
+ }
+
+ transport = &http.Transport{
+ DialContext: dialer.DialContext,
+ MaxIdleConnsPerHost: 50,
+ MaxConnsPerHost: 50,
+ IdleConnTimeout: 90 * time.Second,
+ }
+
+ client = &http.Client{
+ Timeout: 5 * time.Second,
+ Transport: transport,
+ }
+)
+
+type WorkPreparer interface {
+ prepareWork(params *generateParams) (*environment, error)
+ etherbase() common.Address
+}
+
+// simBidReq is the request for simulating a bid
+type simBidReq struct {
+ bid *BidRuntime
+ interruptCh chan int32
+}
+
+// bidSimulator is in charge of receiving bid from builders, reporting issue to builders.
+// And take care of bid simulation, rewards computing, best bid maintaining.
+type bidSimulator struct {
+ config *MevConfig
+ delayLeftOver time.Duration
+ chain *core.BlockChain
+ chainConfig *params.ChainConfig
+ workPreparer WorkPreparer
+
+ running atomic.Bool // controlled by miner
+ exitCh chan struct{}
+
+ bidReceiving atomic.Bool // controlled by config and eth.AdminAPI
+
+ chainHeadCh chan core.ChainHeadEvent
+ chainHeadSub event.Subscription
+
+ sentryCli *builderclient.Client
+
+ // builder info (warning: only keep status in memory!)
+ buildersMu sync.RWMutex
+ builders map[common.Address]*builderclient.Client
+
+ // channels
+ simBidCh chan *simBidReq
+ newBidCh chan *types.Bid
+
+ pendingMu sync.RWMutex
+ pending map[uint64]map[common.Address]map[common.Hash]struct{} // blockNumber -> builder -> bidHash -> struct{}
+
+ bestBidMu sync.RWMutex
+ bestBid map[common.Hash]*BidRuntime // prevBlockHash -> bidRuntime
+
+ simBidMu sync.RWMutex
+ simulatingBid map[common.Hash]*BidRuntime // prevBlockHash -> bidRuntime, in the process of simulation
+}
+
+func newBidSimulator(
+ config *MevConfig,
+ delayLeftOver time.Duration,
+ chainConfig *params.ChainConfig,
+ chain *core.BlockChain,
+ workPreparer WorkPreparer,
+) *bidSimulator {
+ b := &bidSimulator{
+ config: config,
+ delayLeftOver: delayLeftOver,
+ chainConfig: chainConfig,
+ chain: chain,
+ workPreparer: workPreparer,
+ exitCh: make(chan struct{}),
+ chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
+ builders: make(map[common.Address]*builderclient.Client),
+ simBidCh: make(chan *simBidReq),
+ newBidCh: make(chan *types.Bid, 100),
+ pending: make(map[uint64]map[common.Address]map[common.Hash]struct{}),
+ bestBid: make(map[common.Hash]*BidRuntime),
+ simulatingBid: make(map[common.Hash]*BidRuntime),
+ }
+
+ b.chainHeadSub = chain.SubscribeChainHeadEvent(b.chainHeadCh)
+
+ if config.Enabled {
+ b.bidReceiving.Store(true)
+ b.dialSentryAndBuilders()
+
+ if len(b.builders) == 0 {
+ log.Warn("BidSimulator: no valid builders")
+ }
+ }
+
+ go b.clearLoop()
+ go b.mainLoop()
+ go b.newBidLoop()
+
+ return b
+}
+
+func (b *bidSimulator) dialSentryAndBuilders() {
+ var sentryCli *builderclient.Client
+ var err error
+
+ if b.config.SentryURL != "" {
+ sentryCli, err = builderclient.DialOptions(context.Background(), b.config.SentryURL, rpc.WithHTTPClient(client))
+ if err != nil {
+ log.Error("BidSimulator: failed to dial sentry", "url", b.config.SentryURL, "err", err)
+ }
+ }
+
+ b.sentryCli = sentryCli
+
+ for _, v := range b.config.Builders {
+ _ = b.AddBuilder(v.Address, v.URL)
+ }
+}
+
+func (b *bidSimulator) start() {
+ b.running.Store(true)
+}
+
+func (b *bidSimulator) stop() {
+ b.running.Store(false)
+}
+
+func (b *bidSimulator) close() {
+ b.running.Store(false)
+ close(b.exitCh)
+}
+
+func (b *bidSimulator) isRunning() bool {
+ return b.running.Load()
+}
+
+func (b *bidSimulator) receivingBid() bool {
+ return b.bidReceiving.Load()
+}
+
+func (b *bidSimulator) startReceivingBid() {
+ b.dialSentryAndBuilders()
+ b.bidReceiving.Store(true)
+}
+
+func (b *bidSimulator) stopReceivingBid() {
+ b.bidReceiving.Store(false)
+}
+
+func (b *bidSimulator) AddBuilder(builder common.Address, url string) error {
+ b.buildersMu.Lock()
+ defer b.buildersMu.Unlock()
+
+ if b.sentryCli != nil {
+ b.builders[builder] = b.sentryCli
+ } else {
+ var builderCli *builderclient.Client
+
+ if url != "" {
+ var err error
+
+ builderCli, err = builderclient.DialOptions(context.Background(), url, rpc.WithHTTPClient(client))
+ if err != nil {
+ log.Error("BidSimulator: failed to dial builder", "url", url, "err", err)
+ return err
+ }
+ }
+
+ b.builders[builder] = builderCli
+ }
+
+ return nil
+}
+
+func (b *bidSimulator) RemoveBuilder(builder common.Address) error {
+ b.buildersMu.Lock()
+ defer b.buildersMu.Unlock()
+
+ delete(b.builders, builder)
+
+ return nil
+}
+
+func (b *bidSimulator) ExistBuilder(builder common.Address) bool {
+ b.buildersMu.RLock()
+ defer b.buildersMu.RUnlock()
+
+ _, ok := b.builders[builder]
+
+ return ok
+}
+
+func (b *bidSimulator) SetBestBid(prevBlockHash common.Hash, bid *BidRuntime) {
+ b.bestBidMu.Lock()
+ defer b.bestBidMu.Unlock()
+
+ b.bestBid[prevBlockHash] = bid
+}
+
+func (b *bidSimulator) GetBestBid(prevBlockHash common.Hash) *BidRuntime {
+ b.bestBidMu.RLock()
+ defer b.bestBidMu.RUnlock()
+
+ return b.bestBid[prevBlockHash]
+}
+
+func (b *bidSimulator) SetSimulatingBid(prevBlockHash common.Hash, bid *BidRuntime) {
+ b.simBidMu.Lock()
+ defer b.simBidMu.Unlock()
+
+ b.simulatingBid[prevBlockHash] = bid
+}
+
+func (b *bidSimulator) GetSimulatingBid(prevBlockHash common.Hash) *BidRuntime {
+ b.simBidMu.RLock()
+ defer b.simBidMu.RUnlock()
+
+ return b.simulatingBid[prevBlockHash]
+}
+
+func (b *bidSimulator) RemoveSimulatingBid(prevBlockHash common.Hash) {
+ b.simBidMu.Lock()
+ defer b.simBidMu.Unlock()
+
+ delete(b.simulatingBid, prevBlockHash)
+}
+
+func (b *bidSimulator) mainLoop() {
+ defer b.chainHeadSub.Unsubscribe()
+
+ for {
+ select {
+ case req := <-b.simBidCh:
+ if !b.isRunning() {
+ continue
+ }
+
+ b.simBid(req.interruptCh, req.bid)
+
+ // System stopped
+ case <-b.exitCh:
+ return
+
+ case <-b.chainHeadSub.Err():
+ return
+ }
+ }
+}
+
+func (b *bidSimulator) newBidLoop() {
+ var (
+ interruptCh chan int32
+ )
+
+ // commit aborts in-flight bid execution with given signal and resubmits a new one.
+ commit := func(reason int32, bidRuntime *BidRuntime) {
+ // if the left time is not enough to do simulation, return
+ var simDuration time.Duration
+ if lastBid := b.GetBestBid(bidRuntime.bid.ParentHash); lastBid != nil && lastBid.duration != 0 {
+ simDuration = lastBid.duration
+ }
+
+ if time.Until(b.bidMustBefore(bidRuntime.bid.ParentHash)) <= simDuration*leftOverTimeRate/leftOverTimeScale {
+ return
+ }
+
+ if interruptCh != nil {
+ // each commit work will have its own interruptCh to stop work with a reason
+ interruptCh <- reason
+ close(interruptCh)
+ }
+ interruptCh = make(chan int32, 1)
+ select {
+ case b.simBidCh <- &simBidReq{interruptCh: interruptCh, bid: bidRuntime}:
+ case <-b.exitCh:
+ return
+ }
+ }
+
+ for {
+ select {
+ case newBid := <-b.newBidCh:
+ if !b.isRunning() {
+ continue
+ }
+
+ // check the block reward and validator reward of the newBid
+ expectedBlockReward := newBid.GasFee
+ expectedValidatorReward := new(big.Int).Mul(expectedBlockReward, big.NewInt(int64(b.config.ValidatorCommission)))
+ expectedValidatorReward.Div(expectedValidatorReward, big.NewInt(10000))
+ expectedValidatorReward.Sub(expectedValidatorReward, newBid.BuilderFee)
+
+ if expectedValidatorReward.Cmp(big.NewInt(0)) < 0 {
+ // damage self profit, ignore
+ continue
+ }
+
+ bidRuntime := &BidRuntime{
+ bid: newBid,
+ expectedBlockReward: expectedBlockReward,
+ expectedValidatorReward: expectedValidatorReward,
+ packedBlockReward: big.NewInt(0),
+ packedValidatorReward: big.NewInt(0),
+ }
+
+ // TODO(renee-) opt bid comparation
+
+ simulatingBid := b.GetSimulatingBid(newBid.ParentHash)
+ // simulatingBid is nil means there is no bid in simulation
+ if simulatingBid == nil {
+ // bestBid is nil means bid is the first bid
+ bestBid := b.GetBestBid(newBid.ParentHash)
+ if bestBid == nil {
+ commit(commitInterruptBetterBid, bidRuntime)
+ continue
+ }
+
+ // if bestBid is not nil, check if newBid is better than bestBid
+ if bidRuntime.expectedBlockReward.Cmp(bestBid.expectedBlockReward) > 0 &&
+ bidRuntime.expectedValidatorReward.Cmp(bestBid.expectedValidatorReward) > 0 {
+ // if both reward are better than last simulating newBid, commit for simulation
+ commit(commitInterruptBetterBid, bidRuntime)
+ continue
+ }
+
+ continue
+ }
+
+ // simulatingBid must be better than bestBid, if newBid is better than simulatingBid, commit for simulation
+ if bidRuntime.expectedBlockReward.Cmp(simulatingBid.expectedBlockReward) > 0 &&
+ bidRuntime.expectedValidatorReward.Cmp(simulatingBid.expectedValidatorReward) > 0 {
+ // if both reward are better than last simulating newBid, commit for simulation
+ commit(commitInterruptBetterBid, bidRuntime)
+ continue
+ }
+
+ case <-b.exitCh:
+ return
+ }
+ }
+}
+
+func (b *bidSimulator) bidMustBefore(parentHash common.Hash) time.Time {
+ parentHeader := b.chain.GetHeaderByHash(parentHash)
+ return bidutil.BidMustBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver)
+}
+
+func (b *bidSimulator) bidBetterBefore(parentHash common.Hash) time.Time {
+ parentHeader := b.chain.GetHeaderByHash(parentHash)
+ return bidutil.BidBetterBefore(parentHeader, b.chainConfig.Parlia.Period, b.delayLeftOver, b.config.BidSimulationLeftOver)
+}
+
+func (b *bidSimulator) clearLoop() {
+ clearFn := func(parentHash common.Hash, blockNumber uint64) {
+ b.pendingMu.Lock()
+ delete(b.pending, blockNumber)
+ b.pendingMu.Unlock()
+
+ b.bestBidMu.Lock()
+ if bid, ok := b.bestBid[parentHash]; ok {
+ bid.env.discard()
+ }
+ delete(b.bestBid, parentHash)
+ for k, v := range b.bestBid {
+ if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
+ v.env.discard()
+ delete(b.bestBid, k)
+ }
+ }
+ b.bestBidMu.Unlock()
+
+ b.simBidMu.Lock()
+ if bid, ok := b.simulatingBid[parentHash]; ok {
+ bid.env.discard()
+ }
+ delete(b.simulatingBid, parentHash)
+ for k, v := range b.simulatingBid {
+ if v.bid.BlockNumber <= blockNumber-core.TriesInMemory {
+ v.env.discard()
+ delete(b.simulatingBid, k)
+ }
+ }
+ b.simBidMu.Unlock()
+ }
+
+ for head := range b.chainHeadCh {
+ if !b.isRunning() {
+ continue
+ }
+
+ clearFn(head.Block.ParentHash(), head.Block.NumberU64())
+ }
+}
+
+// sendBid checks if the bid is already exists or if the builder sends too many bids,
+// if yes, return error, if not, add bid into newBid chan waiting for judge profit.
+func (b *bidSimulator) sendBid(_ context.Context, bid *types.Bid) error {
+ timer := time.NewTimer(1 * time.Second)
+ defer timer.Stop()
+ select {
+ case b.newBidCh <- bid:
+ b.AddPending(bid.BlockNumber, bid.Builder, bid.Hash())
+ return nil
+ case <-timer.C:
+ return types.ErrMevBusy
+ }
+}
+
+func (b *bidSimulator) CheckPending(blockNumber uint64, builder common.Address, bidHash common.Hash) error {
+ b.pendingMu.Lock()
+ defer b.pendingMu.Unlock()
+
+ // check if bid exists or if builder sends too many bids
+ if _, ok := b.pending[blockNumber]; !ok {
+ b.pending[blockNumber] = make(map[common.Address]map[common.Hash]struct{})
+ }
+
+ if _, ok := b.pending[blockNumber][builder]; !ok {
+ b.pending[blockNumber][builder] = make(map[common.Hash]struct{})
+ }
+
+ if _, ok := b.pending[blockNumber][builder][bidHash]; ok {
+ return errors.New("bid already exists")
+ }
+
+ if len(b.pending[blockNumber][builder]) >= maxBidPerBuilderPerBlock {
+ return errors.New("too many bids")
+ }
+
+ return nil
+}
+
+func (b *bidSimulator) AddPending(blockNumber uint64, builder common.Address, bidHash common.Hash) {
+ b.pendingMu.Lock()
+ defer b.pendingMu.Unlock()
+
+ b.pending[blockNumber][builder][bidHash] = struct{}{}
+}
+
+// simBid simulates a newBid with txs.
+// simBid does not enable state prefetching when commit transaction.
+func (b *bidSimulator) simBid(interruptCh chan int32, bidRuntime *BidRuntime) {
+ // prevent from stopping happen in time interval from sendBid to simBid
+ if !b.isRunning() || !b.receivingBid() {
+ return
+ }
+
+ var (
+ blockNumber = bidRuntime.bid.BlockNumber
+ parentHash = bidRuntime.bid.ParentHash
+ builder = bidRuntime.bid.Builder
+ err error
+ success bool
+ )
+
+ // ensure simulation exited then start next simulation
+ b.SetSimulatingBid(parentHash, bidRuntime)
+
+ defer func(simStart time.Time) {
+ logCtx := []any{
+ "blockNumber", blockNumber,
+ "parentHash", parentHash,
+ "builder", builder,
+ "gasUsed", bidRuntime.bid.GasUsed,
+ }
+
+ if bidRuntime.env != nil {
+ logCtx = append(logCtx, "gasLimit", bidRuntime.env.header.GasLimit)
+
+ if err != nil || !success {
+ bidRuntime.env.discard()
+ }
+ }
+
+ if err != nil {
+ logCtx = append(logCtx, "err", err)
+ log.Debug("bid simulation failed", logCtx...)
+
+ go b.reportIssue(bidRuntime, err)
+ }
+
+ if success {
+ bidRuntime.duration = time.Since(simStart)
+ }
+
+ b.RemoveSimulatingBid(parentHash)
+ }(time.Now())
+
+ // prepareWork will configure header with a suitable time according to consensus
+ // prepareWork will start trie prefetching
+ if bidRuntime.env, err = b.workPreparer.prepareWork(&generateParams{
+ parentHash: bidRuntime.bid.ParentHash,
+ coinbase: b.workPreparer.etherbase(),
+ }); err != nil {
+ return
+ }
+
+ gasLimit := bidRuntime.env.header.GasLimit
+ if bidRuntime.env.gasPool == nil {
+ bidRuntime.env.gasPool = new(core.GasPool).AddGas(gasLimit)
+ bidRuntime.env.gasPool.SubGas(params.SystemTxsGas)
+ }
+
+ if bidRuntime.bid.GasUsed > bidRuntime.env.gasPool.Gas() {
+ err = errors.New("gas used exceeds gas limit")
+ return
+ }
+
+ for _, tx := range bidRuntime.bid.Txs {
+ select {
+ case <-interruptCh:
+ err = errors.New("simulation abort due to better bid arrived")
+ return
+
+ case <-b.exitCh:
+ err = errors.New("miner exit")
+ return
+
+ default:
+ }
+
+ // Start executing the transaction
+ bidRuntime.env.state.SetTxContext(tx.Hash(), bidRuntime.env.tcount)
+
+ err = bidRuntime.commitTransaction(b.chain, b.chainConfig, tx)
+ if err != nil {
+ log.Error("BidSimulator: failed to commit tx", "bidHash", bidRuntime.bid.Hash(), "tx", tx.Hash(), "err", err)
+ err = fmt.Errorf("invalid tx in bid, %v", err)
+ return
+ }
+
+ bidRuntime.env.tcount++
+ }
+
+ bidRuntime.packReward(b.config.ValidatorCommission)
+
+ // return if bid is invalid, reportIssue issue to mev-sentry/builder if simulation is fully done
+ if !bidRuntime.validReward() {
+ err = errors.New("reward does not achieve the expectation")
+ return
+ }
+
+ bestBid := b.GetBestBid(parentHash)
+
+ if bestBid == nil {
+ b.SetBestBid(bidRuntime.bid.ParentHash, bidRuntime)
+ success = true
+ return
+ }
+
+ // this is the simplest strategy: best for all the delegators.
+ if bidRuntime.packedBlockReward.Cmp(bestBid.packedBlockReward) > 0 {
+ b.SetBestBid(bidRuntime.bid.ParentHash, bidRuntime)
+ success = true
+ return
+ }
+}
+
+// reportIssue reports the issue to the mev-sentry
+func (b *bidSimulator) reportIssue(bidRuntime *BidRuntime, err error) {
+ cli := b.builders[bidRuntime.bid.Builder]
+ if cli != nil {
+ cli.ReportIssue(context.Background(), &types.BidIssue{
+ Validator: bidRuntime.env.header.Coinbase,
+ Builder: bidRuntime.bid.Builder,
+ Message: err.Error(),
+ })
+ }
+}
+
+type BidRuntime struct {
+ bid *types.Bid
+
+ env *environment
+
+ expectedBlockReward *big.Int
+ expectedValidatorReward *big.Int
+
+ packedBlockReward *big.Int
+ packedValidatorReward *big.Int
+
+ duration time.Duration
+}
+
+func (r *BidRuntime) validReward() bool {
+ return r.packedBlockReward.Cmp(r.expectedBlockReward) >= 0 &&
+ r.packedValidatorReward.Cmp(r.expectedValidatorReward) >= 0
+}
+
+// packReward calculates packedBlockReward and packedValidatorReward
+func (r *BidRuntime) packReward(validatorCommission uint64) {
+ r.packedBlockReward = r.env.state.GetBalance(consensus.SystemAddress).ToBig()
+ r.packedValidatorReward = new(big.Int).Mul(r.packedBlockReward, big.NewInt(int64(validatorCommission)))
+ r.packedValidatorReward.Div(r.packedValidatorReward, big.NewInt(10000))
+ r.packedValidatorReward.Sub(r.packedValidatorReward, r.bid.BuilderFee)
+}
+
+func (r *BidRuntime) commitTransaction(chain *core.BlockChain, chainConfig *params.ChainConfig, tx *types.Transaction) error {
+ var (
+ env = r.env
+ snap = env.state.Snapshot()
+ gp = env.gasPool.Gas()
+ sc *types.BlobTxSidecar
+ )
+
+ if tx.Type() == types.BlobTxType {
+ sc := tx.BlobTxSidecar()
+ if sc == nil {
+ return errors.New("blob transaction without blobs in miner")
+ }
+ // Checking against blob gas limit: It's kind of ugly to perform this check here, but there
+ // isn't really a better place right now. The blob gas limit is checked at block validation time
+ // and not during execution. This means core.ApplyTransaction will not return an error if the
+ // tx has too many blobs. So we have to explicitly check it here.
+ if (env.blobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
+ return errors.New("max data blobs reached")
+ }
+ }
+
+ receipt, err := core.ApplyTransaction(chainConfig, chain, &env.coinbase, env.gasPool, env.state, env.header, tx,
+ &env.header.GasUsed, *chain.GetVMConfig(), core.NewReceiptBloomGenerator())
+ if err != nil {
+ env.state.RevertToSnapshot(snap)
+ env.gasPool.SetGas(gp)
+ return err
+ }
+
+ if tx.Type() == types.BlobTxType {
+ env.txs = append(env.txs, tx.WithoutBlobTxSidecar())
+ env.receipts = append(env.receipts, receipt)
+ env.sidecars = append(env.sidecars, sc)
+ env.blobs += len(sc.Blobs)
+ *env.header.BlobGasUsed += receipt.BlobGasUsed
+ } else {
+ env.txs = append(env.txs, tx)
+ env.receipts = append(env.receipts, receipt)
+ }
+
+ return nil
+}
diff --git a/miner/builderclient/builderclient.go b/miner/builderclient/builderclient.go
new file mode 100644
index 0000000000..9606a92d95
--- /dev/null
+++ b/miner/builderclient/builderclient.go
@@ -0,0 +1,33 @@
+package builderclient
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// Client defines typed wrappers for the Ethereum RPC API.
+type Client struct {
+ c *rpc.Client
+}
+
+// DialOptions creates a new RPC client for the given URL. You can supply any of the
+// pre-defined client options to configure the underlying transport.
+func DialOptions(ctx context.Context, rawurl string, opts ...rpc.ClientOption) (*Client, error) {
+ c, err := rpc.DialOptions(ctx, rawurl, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return newClient(c), nil
+}
+
+// newClient creates a client that uses the given RPC client.
+func newClient(c *rpc.Client) *Client {
+ return &Client{c}
+}
+
+// ReportIssue reports an issue
+func (ec *Client) ReportIssue(ctx context.Context, args *types.BidIssue) error {
+ return ec.c.CallContext(ctx, nil, "mev_reportIssue", args)
+}
diff --git a/miner/miner.go b/miner/miner.go
index 4db6140803..18a267ab83 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -56,6 +56,8 @@ type Config struct {
NewPayloadTimeout time.Duration // The maximum time allowance for creating a new payload
DisableVoteAttestation bool // Whether to skip assembling vote attestation
+
+ Mev MevConfig // Mev configuration
}
// DefaultConfig contains default settings for miner.
@@ -70,6 +72,8 @@ var DefaultConfig = Config{
Recommit: 3 * time.Second,
NewPayloadTimeout: 2 * time.Second,
DelayLeftOver: 50 * time.Millisecond,
+
+ Mev: DefaultMevConfig,
}
// Miner creates blocks and searches for proof-of-work values.
@@ -82,6 +86,8 @@ type Miner struct {
stopCh chan struct{}
worker *worker
+ bidSimulator *bidSimulator
+
wg sync.WaitGroup
}
@@ -95,6 +101,10 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even
stopCh: make(chan struct{}),
worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, false),
}
+
+ miner.bidSimulator = newBidSimulator(&config.Mev, config.DelayLeftOver, chainConfig, eth.BlockChain(), miner.worker)
+ miner.worker.setBestBidFetcher(miner.bidSimulator)
+
miner.wg.Add(1)
go miner.update()
return miner
@@ -129,6 +139,7 @@ func (miner *Miner) update() {
case downloader.StartEvent:
wasMining := miner.Mining()
miner.worker.stop()
+ miner.bidSimulator.stop()
canStart = false
if wasMining {
// Resume mining after sync was finished
@@ -141,6 +152,7 @@ func (miner *Miner) update() {
canStart = true
if shouldStart {
miner.worker.start()
+ miner.bidSimulator.start()
}
miner.worker.syncing.Store(false)
@@ -148,6 +160,7 @@ func (miner *Miner) update() {
canStart = true
if shouldStart {
miner.worker.start()
+ miner.bidSimulator.start()
}
miner.worker.syncing.Store(false)
@@ -157,13 +170,16 @@ func (miner *Miner) update() {
case <-miner.startCh:
if canStart {
miner.worker.start()
+ miner.bidSimulator.start()
}
shouldStart = true
case <-miner.stopCh:
shouldStart = false
miner.worker.stop()
+ miner.bidSimulator.stop()
case <-miner.exitCh:
miner.worker.close()
+ miner.bidSimulator.close()
return
}
}
@@ -186,6 +202,10 @@ func (miner *Miner) Mining() bool {
return miner.worker.isRunning()
}
+func (miner *Miner) InTurn() bool {
+ return miner.worker.inTurn()
+}
+
func (miner *Miner) Hashrate() uint64 {
if pow, ok := miner.engine.(consensus.PoW); ok {
return uint64(pow.Hashrate())
@@ -201,6 +221,11 @@ func (miner *Miner) SetExtra(extra []byte) error {
return nil
}
+func (miner *Miner) SetGasTip(tip *big.Int) error {
+ miner.worker.setGasTip(tip)
+ return nil
+}
+
// SetRecommitInterval sets the interval for sealing work resubmitting.
func (miner *Miner) SetRecommitInterval(interval time.Duration) {
miner.worker.setRecommitInterval(interval)
diff --git a/miner/miner_mev.go b/miner/miner_mev.go
new file mode 100644
index 0000000000..c499d0387d
--- /dev/null
+++ b/miner/miner_mev.go
@@ -0,0 +1,111 @@
+package miner
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+type BuilderConfig struct {
+ Address common.Address
+ URL string
+}
+
+type MevConfig struct {
+ Enabled bool // Whether to enable Mev or not
+ SentryURL string // The url of Mev sentry
+ Builders []BuilderConfig // The list of builders
+ ValidatorCommission uint64 // 100 means 1%
+ BidSimulationLeftOver time.Duration
+}
+
+var DefaultMevConfig = MevConfig{
+ Enabled: false,
+ SentryURL: "",
+ Builders: nil,
+ ValidatorCommission: 100,
+ BidSimulationLeftOver: 50 * time.Millisecond,
+}
+
+// MevRunning return true if mev is running.
+func (miner *Miner) MevRunning() bool {
+ return miner.bidSimulator.isRunning() && miner.bidSimulator.receivingBid()
+}
+
+// StartMev starts mev.
+func (miner *Miner) StartMev() {
+ miner.bidSimulator.startReceivingBid()
+}
+
+// StopMev stops mev.
+func (miner *Miner) StopMev() {
+ miner.bidSimulator.stopReceivingBid()
+}
+
+// AddBuilder adds a builder to the bid simulator.
+func (miner *Miner) AddBuilder(builder common.Address, url string) error {
+ return miner.bidSimulator.AddBuilder(builder, url)
+}
+
+// RemoveBuilder removes a builder from the bid simulator.
+func (miner *Miner) RemoveBuilder(builderAddr common.Address) error {
+ return miner.bidSimulator.RemoveBuilder(builderAddr)
+}
+
+func (miner *Miner) SendBid(ctx context.Context, bidArgs *types.BidArgs) (common.Hash, error) {
+ builder, err := bidArgs.EcrecoverSender()
+ if err != nil {
+ return common.Hash{}, types.NewInvalidBidError(fmt.Sprintf("invalid signature:%v", err))
+ }
+
+ if !miner.bidSimulator.ExistBuilder(builder) {
+ return common.Hash{}, types.NewInvalidBidError("builder is not registered")
+ }
+
+ err = miner.bidSimulator.CheckPending(bidArgs.RawBid.BlockNumber, builder, bidArgs.RawBid.Hash())
+ if err != nil {
+ return common.Hash{}, err
+ }
+
+ signer := types.MakeSigner(miner.worker.chainConfig, big.NewInt(int64(bidArgs.RawBid.BlockNumber)), uint64(time.Now().Unix()))
+ bid, err := bidArgs.ToBid(builder, signer)
+ if err != nil {
+ return common.Hash{}, types.NewInvalidBidError(fmt.Sprintf("fail to convert bidArgs to bid, %v", err))
+ }
+
+ bidBetterBefore := miner.bidSimulator.bidBetterBefore(bidArgs.RawBid.ParentHash)
+ timeout := time.Until(bidBetterBefore)
+
+ if timeout <= 0 {
+ return common.Hash{}, fmt.Errorf("too late, expected befor %s, appeared %s later", bidBetterBefore,
+ common.PrettyDuration(timeout))
+ }
+
+ err = miner.bidSimulator.sendBid(ctx, bid)
+
+ if err != nil {
+ return common.Hash{}, err
+ }
+
+ return bid.Hash(), nil
+}
+
+func (miner *Miner) BestPackedBlockReward(parentHash common.Hash) *big.Int {
+ bidRuntime := miner.bidSimulator.GetBestBid(parentHash)
+ if bidRuntime == nil {
+ return big.NewInt(0)
+ }
+
+ return bidRuntime.packedBlockReward
+}
+
+func (miner *Miner) MevParams() *types.MevParams {
+ return &types.MevParams{
+ ValidatorCommission: miner.worker.config.Mev.ValidatorCommission,
+ BidSimulationLeftOver: miner.worker.config.Mev.BidSimulationLeftOver,
+ }
+}
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 411d6026ce..5907fb4464 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
)
type mockBackend struct {
@@ -279,7 +280,7 @@ func minerTestGenesisBlock(period uint64, gasLimit uint64, faucet common.Address
GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1),
- Alloc: map[common.Address]core.GenesisAccount{
+ Alloc: map[common.Address]types.Account{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256
common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD
@@ -300,7 +301,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
}
// Create chainConfig
chainDB := rawdb.NewMemoryDatabase()
- triedb := trie.NewDatabase(chainDB, nil)
+ triedb := triedb.NewDatabase(chainDB, nil)
genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
if err != nil {
@@ -317,7 +318,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) {
blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
pool := legacypool.New(testTxPoolConfig, blockchain)
- txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain, []txpool.SubPool{pool})
+ txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool})
backend := NewMockBackend(bc, txpool)
// Create event Mux
diff --git a/miner/ordering.go b/miner/ordering.go
index fa47e93172..7cbe2d5630 100644
--- a/miner/ordering.go
+++ b/miner/ordering.go
@@ -21,28 +21,31 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/holiman/uint256"
)
// txWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap
type txWithMinerFee struct {
tx *txpool.LazyTransaction
from common.Address
- fees *big.Int
+ fees *uint256.Int
}
// newTxWithMinerFee creates a wrapped transaction, calculating the effective
// miner gasTipCap if a base fee is provided.
// Returns error in case of a negative effective miner gasTipCap.
-func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *big.Int) (*txWithMinerFee, error) {
- tip := new(big.Int).Set(tx.GasTipCap)
+func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *uint256.Int) (*txWithMinerFee, error) {
+ tip := new(uint256.Int).Set(tx.GasTipCap)
if baseFee != nil {
if tx.GasFeeCap.Cmp(baseFee) < 0 {
return nil, types.ErrGasFeeCapTooLow
}
- tip = math.BigMin(tx.GasTipCap, new(big.Int).Sub(tx.GasFeeCap, baseFee))
+ tip = new(uint256.Int).Sub(tx.GasFeeCap, baseFee)
+ if tip.Gt(tx.GasTipCap) {
+ tip = tx.GasTipCap
+ }
}
return &txWithMinerFee{
tx: tx,
@@ -87,7 +90,7 @@ type transactionsByPriceAndNonce struct {
txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions
heads txByPriceAndTime // Next transaction for each unique account (price heap)
signer types.Signer // Signer for the set of transactions
- baseFee *big.Int // Current base fee
+ baseFee *uint256.Int // Current base fee
}
// newTransactionsByPriceAndNonce creates a transaction set that can retrieve
@@ -96,10 +99,15 @@ type transactionsByPriceAndNonce struct {
// Note, the input map is reowned so the caller should not interact any more with
// if after providing it to the constructor.
func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *transactionsByPriceAndNonce {
+ // Convert the basefee from header format to uint256 format
+ var baseFeeUint *uint256.Int
+ if baseFee != nil {
+ baseFeeUint = uint256.MustFromBig(baseFee)
+ }
// Initialize a price and received time based heap with the head transactions
heads := make(txByPriceAndTime, 0, len(txs))
for from, accTxs := range txs {
- wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFee)
+ wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFeeUint)
if err != nil {
delete(txs, from)
continue
@@ -114,7 +122,7 @@ func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address]
txs: txs,
heads: heads,
signer: signer,
- baseFee: baseFee,
+ baseFee: baseFeeUint,
}
}
@@ -126,24 +134,24 @@ func (t *transactionsByPriceAndNonce) Copy() *transactionsByPriceAndNonce {
for acc, txsTmp := range t.txs {
txs[acc] = txsTmp
}
- var baseFee *big.Int
+ var baseFee uint256.Int
if t.baseFee != nil {
- baseFee = big.NewInt(0).Set(t.baseFee)
+ baseFee = *t.baseFee
}
return &transactionsByPriceAndNonce{
heads: heads,
txs: txs,
signer: t.signer,
- baseFee: baseFee,
+ baseFee: &baseFee,
}
}
// Peek returns the next transaction by price.
-func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction {
+func (t *transactionsByPriceAndNonce) Peek() (*txpool.LazyTransaction, *uint256.Int) {
if len(t.heads) == 0 {
- return nil
+ return nil, nil
}
- return t.heads[0].tx
+ return t.heads[0].tx, t.heads[0].fees
}
// Peek returns the next transaction by price.
@@ -174,6 +182,17 @@ func (t *transactionsByPriceAndNonce) Pop() {
heap.Pop(&t.heads)
}
+// Empty returns if the price heap is empty. It can be used to check it simpler
+// than calling peek and checking for nil return.
+func (t *transactionsByPriceAndNonce) Empty() bool {
+ return len(t.heads) == 0
+}
+
+// Clear removes the entire content of the heap.
+func (t *transactionsByPriceAndNonce) Clear() {
+ t.heads, t.txs = nil, nil
+}
+
func (t *transactionsByPriceAndNonce) CurrentSize() int {
return len(t.heads)
}
diff --git a/miner/ordering_test.go b/miner/ordering_test.go
index e5868d7a06..3587a835c8 100644
--- a/miner/ordering_test.go
+++ b/miner/ordering_test.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/holiman/uint256"
)
func TestTransactionPriceNonceSortLegacy(t *testing.T) {
@@ -92,8 +93,8 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
Hash: tx.Hash(),
Tx: tx,
Time: tx.Time(),
- GasFeeCap: tx.GasFeeCap(),
- GasTipCap: tx.GasTipCap(),
+ GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ GasTipCap: uint256.MustFromBig(tx.GasTipCap()),
Gas: tx.Gas(),
BlobGas: tx.BlobGas(),
})
@@ -104,7 +105,7 @@ func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) {
txset := newTransactionsByPriceAndNonce(signer, groups, baseFee)
txs := types.Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ for tx, _ := txset.Peek(); tx != nil; tx, _ = txset.Peek() {
txs = append(txs, tx.Tx)
txset.Shift()
}
@@ -160,8 +161,8 @@ func TestTransactionTimeSort(t *testing.T) {
Hash: tx.Hash(),
Tx: tx,
Time: tx.Time(),
- GasFeeCap: tx.GasFeeCap(),
- GasTipCap: tx.GasTipCap(),
+ GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()),
+ GasTipCap: uint256.MustFromBig(tx.GasTipCap()),
Gas: tx.Gas(),
BlobGas: tx.BlobGas(),
})
@@ -170,7 +171,7 @@ func TestTransactionTimeSort(t *testing.T) {
txset := newTransactionsByPriceAndNonce(signer, groups, nil)
txs := types.Transactions{}
- for tx := txset.Peek(); tx != nil; tx = txset.Peek() {
+ for tx, _ := txset.Peek(); tx != nil; tx, _ = txset.Peek() {
txs = append(txs, tx.Tx)
txset.Shift()
}
diff --git a/miner/stress/clique/main.go b/miner/stress/clique/main.go
index ead3d8df35..2bebe881b1 100644
--- a/miner/stress/clique/main.go
+++ b/miner/stress/clique/main.go
@@ -154,9 +154,9 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
genesis.Config.ChainID = big.NewInt(18)
genesis.Config.Clique.Period = 1
- genesis.Alloc = core.GenesisAlloc{}
+ genesis.Alloc = types.GenesisAlloc{}
for _, faucet := range faucets {
- genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{
+ genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = types.Account{
Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil),
}
}
@@ -183,7 +183,6 @@ func makeGenesis(faucets []*ecdsa.PrivateKey, sealers []*ecdsa.PrivateKey) *core
func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) {
// Define the basic configurations for the Ethereum node
datadir, _ := os.MkdirTemp("", "")
-
config := &node.Config{
Name: "geth",
Version: params.Version,
diff --git a/miner/worker.go b/miner/worker.go
index c8232c2945..ba5afdf41f 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -24,6 +24,9 @@ import (
"sync/atomic"
"time"
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
@@ -40,8 +43,6 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
- lru "github.com/hashicorp/golang-lru"
- "github.com/holiman/uint256"
)
const (
@@ -162,9 +163,14 @@ type getWorkReq struct {
result chan *newPayloadResult // non-blocking channel
}
+type bidFetcher interface {
+ GetBestBid(parentHash common.Hash) *BidRuntime
+}
+
// worker is the main object which takes care of submitting new work to consensus engine
// and gathering the sealing result.
type worker struct {
+ bidFetcher bidFetcher
prefetcher core.Prefetcher
config *Config
chainConfig *params.ChainConfig
@@ -196,6 +202,7 @@ type worker struct {
mu sync.RWMutex // The lock used to protect the coinbase and extra fields
coinbase common.Address
extra []byte
+ tip *uint256.Int // Minimum tip needed for non-local transaction to include them
pendingMu sync.RWMutex
pendingTasks map[common.Hash]*task
@@ -244,6 +251,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
isLocalBlock: isLocalBlock,
coinbase: config.Etherbase,
extra: config.ExtraData,
+ tip: uint256.MustFromBig(config.GasPrice),
pendingTasks: make(map[common.Hash]*task),
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize),
newWorkCh: make(chan *newWorkReq),
@@ -287,9 +295,14 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus
if init {
worker.startCh <- struct{}{}
}
+
return worker
}
+func (w *worker) setBestBidFetcher(fetcher bidFetcher) {
+ w.bidFetcher = fetcher
+}
+
// setEtherbase sets the etherbase used to initialize the block coinbase field.
func (w *worker) setEtherbase(addr common.Address) {
w.mu.Lock()
@@ -317,6 +330,13 @@ func (w *worker) setExtra(extra []byte) {
w.extra = extra
}
+// setGasTip sets the minimum miner tip needed to include a non-local transaction.
+func (w *worker) setGasTip(tip *big.Int) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ w.tip = uint256.MustFromBig(tip)
+}
+
// setRecommitInterval updates the interval for miner sealing work recommitting.
func (w *worker) setRecommitInterval(interval time.Duration) {
select {
@@ -747,7 +767,7 @@ func (w *worker) applyTransaction(env *environment, tx *types.Transaction, recei
return receipt, err
}
-func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce,
+func (w *worker) commitTransactions(env *environment, plainTxs, blobTxs *transactionsByPriceAndNonce,
interruptCh chan int32, stopTimer *time.Timer) error {
gasLimit := env.header.GasLimit
if env.gasPool == nil {
@@ -758,15 +778,15 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
var coalescedLogs []*types.Log
// initialize bloom processors
processorCapacity := 100
- if txs.CurrentSize() < processorCapacity {
- processorCapacity = txs.CurrentSize()
+ if plainTxs.CurrentSize() < processorCapacity {
+ processorCapacity = plainTxs.CurrentSize()
}
bloomProcessors := core.NewAsyncReceiptBloomGenerator(processorCapacity)
stopPrefetchCh := make(chan struct{})
defer close(stopPrefetchCh)
- // prefetch txs from all pending txs
- txsPrefetch := txs.Copy()
+ // prefetch plainTxs txs, don't bother to prefetch a few blobTxs
+ txsPrefetch := plainTxs.Copy()
tx := txsPrefetch.PeekWithUnwrap()
if tx != nil {
txCurr := &tx
@@ -809,11 +829,38 @@ LOOP:
default:
}
}
- // Retrieve the next transaction and abort if all done
- ltx := txs.Peek()
+
+ // If we don't have enough blob space for any further blob transactions,
+ // skip that list altogether
+ if !blobTxs.Empty() && env.blobs*params.BlobTxBlobGasPerBlob >= params.MaxBlobGasPerBlock {
+ log.Trace("Not enough blob space for further blob transactions")
+ blobTxs.Clear()
+ // Fall though to pick up any plain txs
+ }
+ // Retrieve the next transaction and abort if all done.
+ var (
+ ltx *txpool.LazyTransaction
+ txs *transactionsByPriceAndNonce
+ )
+ pltx, ptip := plainTxs.Peek()
+ bltx, btip := blobTxs.Peek()
+
+ switch {
+ case pltx == nil:
+ txs, ltx = blobTxs, bltx
+ case bltx == nil:
+ txs, ltx = plainTxs, pltx
+ default:
+ if ptip.Lt(btip) {
+ txs, ltx = blobTxs, bltx
+ } else {
+ txs, ltx = plainTxs, pltx
+ }
+ }
if ltx == nil {
break
}
+
// If we don't have enough space for the next transaction, skip the account.
if env.gasPool.Gas() < ltx.Gas {
log.Trace("Not enough gas left for transaction", "hash", ltx.Hash, "left", env.gasPool.Gas(), "needed", ltx.Gas)
@@ -910,7 +957,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if genParams.parentHash != (common.Hash{}) {
block := w.chain.GetBlockByHash(genParams.parentHash)
if block == nil {
- return nil, fmt.Errorf("missing parent")
+ return nil, errors.New("missing parent")
}
parent = block.Header()
}
@@ -958,7 +1005,9 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
}
header.BlobGasUsed = new(uint64)
header.ExcessBlobGas = &excessBlobGas
- header.ParentBeaconRoot = genParams.beaconRoot
+ if w.chainConfig.Parlia == nil {
+ header.ParentBeaconRoot = genParams.beaconRoot
+ }
}
// Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil {
@@ -991,39 +1040,65 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
// into the given sealing block. The transaction selection and ordering strategy can
// be customized with the plugin in the future.
func (w *worker) fillTransactions(interruptCh chan int32, env *environment, stopTimer *time.Timer) (err error) {
- // Split the pending transactions into locals and remotes
- // Fill the block with all available pending transactions.
- pending := w.eth.TxPool().Pending(false)
+ w.mu.RLock()
+ tip := w.tip
+ w.mu.RUnlock()
+
+ // Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees
+ filter := txpool.PendingFilter{
+ MinTip: tip,
+ }
+ if env.header.BaseFee != nil {
+ filter.BaseFee = uint256.MustFromBig(env.header.BaseFee)
+ }
+ if env.header.ExcessBlobGas != nil {
+ filter.BlobFee = uint256.MustFromBig(eip4844.CalcBlobFee(*env.header.ExcessBlobGas))
+ }
+ filter.OnlyPlainTxs, filter.OnlyBlobTxs = true, false
+ pendingPlainTxs := w.eth.TxPool().Pending(filter)
+
+ filter.OnlyPlainTxs, filter.OnlyBlobTxs = false, true
+ pendingBlobTxs := w.eth.TxPool().Pending(filter)
// Split the pending transactions into locals and remotes.
- localTxs, remoteTxs := make(map[common.Address][]*txpool.LazyTransaction), pending
+ localPlainTxs, remotePlainTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingPlainTxs
+ localBlobTxs, remoteBlobTxs := make(map[common.Address][]*txpool.LazyTransaction), pendingBlobTxs
+
for _, account := range w.eth.TxPool().Locals() {
- if txs := remoteTxs[account]; len(txs) > 0 {
- delete(remoteTxs, account)
- localTxs[account] = txs
+ if txs := remotePlainTxs[account]; len(txs) > 0 {
+ delete(remotePlainTxs, account)
+ localPlainTxs[account] = txs
+ }
+ if txs := remoteBlobTxs[account]; len(txs) > 0 {
+ delete(remoteBlobTxs, account)
+ localBlobTxs[account] = txs
}
}
-
// Fill the block with all available pending transactions.
- if len(localTxs) > 0 {
- txs := newTransactionsByPriceAndNonce(env.signer, localTxs, env.header.BaseFee)
- err = w.commitTransactions(env, txs, interruptCh, stopTimer)
- // we will abort here when:
- // 1.new block was imported
- // 2.out of Gas, no more transaction can be added.
- // 3.the mining timer has expired, stop adding transactions.
- // 4.interrupted resubmit timer, which is by default 10s.
- // resubmit is for PoW only, can be deleted for PoS consensus later
- if err != nil {
- return
+ // we will abort when:
+ // 1.new block was imported
+ // 2.out of Gas, no more transaction can be added.
+ // 3.the mining timer has expired, stop adding transactions.
+ // 4.interrupted resubmit timer, which is by default 10s.
+ // resubmit is for PoW only, can be deleted for PoS consensus later
+ if len(localPlainTxs) > 0 || len(localBlobTxs) > 0 {
+ plainTxs := newTransactionsByPriceAndNonce(env.signer, localPlainTxs, env.header.BaseFee)
+ blobTxs := newTransactionsByPriceAndNonce(env.signer, localBlobTxs, env.header.BaseFee)
+
+ if err := w.commitTransactions(env, plainTxs, blobTxs, interruptCh, stopTimer); err != nil {
+ return err
}
}
- if len(remoteTxs) > 0 {
- txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, env.header.BaseFee)
- err = w.commitTransactions(env, txs, interruptCh, stopTimer)
+ if len(remotePlainTxs) > 0 || len(remoteBlobTxs) > 0 {
+ plainTxs := newTransactionsByPriceAndNonce(env.signer, remotePlainTxs, env.header.BaseFee)
+ blobTxs := newTransactionsByPriceAndNonce(env.signer, remoteBlobTxs, env.header.BaseFee)
+
+ if err := w.commitTransactions(env, plainTxs, blobTxs, interruptCh, stopTimer); err != nil {
+ return err
+ }
}
- return
+ return nil
}
// generateWork generates a sealing block based on the given parameters.
@@ -1218,6 +1293,24 @@ LOOP:
bestReward = balance
}
}
+
+ // when out-turn, use bestWork to prevent bundle leakage.
+ // when in-turn, compare with remote work.
+ if w.bidFetcher != nil && bestWork.header.Difficulty.Cmp(diffInTurn) == 0 {
+ bestBid := w.bidFetcher.GetBestBid(bestWork.header.ParentHash)
+
+ if bestBid != nil && bestReward.CmpBig(bestBid.packedBlockReward) < 0 {
+ // localValidatorReward is the reward for the validator self by the local block.
+ localValidatorReward := new(uint256.Int).Mul(bestReward, uint256.NewInt(w.config.Mev.ValidatorCommission))
+ localValidatorReward.Div(localValidatorReward, uint256.NewInt(10000))
+
+ // blockReward(benefits delegators) and validatorReward(benefits the validator) are both optimal
+ if localValidatorReward.CmpBig(bestBid.packedValidatorReward) < 0 {
+ bestWork = bestBid.env
+ }
+ }
+ }
+
w.commit(bestWork, w.fullTaskHook, true, start)
// Swap out the old work with the new one, terminating any leftover
@@ -1228,6 +1321,12 @@ LOOP:
w.current = bestWork
}
+// inTurn return true if the current worker is in turn.
+func (w *worker) inTurn() bool {
+ validator, _ := w.engine.NextInTurnValidator(w.chain, w.chain.CurrentBlock())
+ return validator != common.Address{} && validator == w.etherbase()
+}
+
// commit runs any post-transaction state modifications, assembles the final block
// and commits new work if consensus engine is running.
// Note the assumption is held that the mutation is allowed to the passed env, do
diff --git a/miner/worker_test.go b/miner/worker_test.go
index 1bd22a646c..268f3f69a5 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -116,7 +116,7 @@ type testWorkerBackend struct {
func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend {
var gspec = &core.Genesis{
Config: chainConfig,
- Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
+ Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
}
switch e := engine.(type) {
case *clique.Clique:
@@ -134,7 +134,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
t.Fatalf("core.NewBlockChain failed: %v", err)
}
pool := legacypool.New(testTxPoolConfig, chain)
- txpool, _ := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), chain, []txpool.SubPool{pool})
+ txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool})
return &testWorkerBackend{
db: db,
diff --git a/node/defaults.go b/node/defaults.go
index 8b03e24c28..29cb68947a 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -41,6 +41,7 @@ const (
// needs of all CLs.
engineAPIBatchItemLimit = 2000
engineAPIBatchResponseSizeLimit = 250 * 1000 * 1000
+ engineAPIBodyLimit = 128 * 1024 * 1024
)
var (
diff --git a/node/node.go b/node/node.go
index 2c8e6f0f88..bd102d32fb 100644
--- a/node/node.go
+++ b/node/node.go
@@ -493,14 +493,16 @@ func (n *Node) startRPC() error {
jwtSecret: secret,
batchItemLimit: engineAPIBatchItemLimit,
batchResponseSizeLimit: engineAPIBatchResponseSizeLimit,
+ httpBodyLimit: engineAPIBodyLimit,
}
- if err := server.enableRPC(allAPIs, httpConfig{
+ err := server.enableRPC(allAPIs, httpConfig{
CorsAllowedOrigins: DefaultAuthCors,
Vhosts: n.config.AuthVirtualHosts,
Modules: DefaultAuthModules,
prefix: DefaultAuthPrefix,
rpcEndpointConfig: sharedConfig,
- }); err != nil {
+ })
+ if err != nil {
return err
}
servers = append(servers, server)
@@ -784,10 +786,30 @@ func (n *Node) OpenAndMergeDatabase(name string, cache, handles int, freezer, di
if persistDiff {
chainDataHandles = handles * chainDataHandlesPercentage / 100
}
+ var statediskdb ethdb.Database
+ var err error
+ // Open the separated state database if the state directory exists
+ if n.IsSeparatedDB() {
+ // Allocate half of the handles and cache to this separate state data database
+ statediskdb, err = n.OpenDatabaseWithFreezer(name+"/state", cache/2, chainDataHandles/2, "", "eth/db/statedata/", readonly, false, false, pruneAncientData)
+ if err != nil {
+ return nil, err
+ }
+
+ // Reduce the handles and cache to this separate database because it is not a complete database with no trie data storing in it.
+ cache = int(float64(cache) * 0.6)
+ chainDataHandles = int(float64(chainDataHandles) * 0.6)
+ }
+
chainDB, err := n.OpenDatabaseWithFreezer(name, cache, chainDataHandles, freezer, namespace, readonly, false, false, pruneAncientData)
if err != nil {
return nil, err
}
+
+ if statediskdb != nil {
+ chainDB.SetStateStore(statediskdb)
+ }
+
if persistDiff {
diffStore, err := n.OpenDiffDatabase(name, handles-chainDataHandles, diff, namespace, readonly)
if err != nil {
@@ -835,6 +857,16 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient,
return db, err
}
+// IsSeparatedDB check the state subdirectory of db, if subdirectory exists, return true
+func (n *Node) IsSeparatedDB() bool {
+ separateDir := filepath.Join(n.ResolvePath("chaindata"), "state")
+ fileInfo, err := os.Stat(separateDir)
+ if os.IsNotExist(err) {
+ return false
+ }
+ return fileInfo.IsDir()
+}
+
func (n *Node) OpenDiffDatabase(name string, handles int, diff, namespace string, readonly bool) (*leveldb.Database, error) {
n.lock.Lock()
defer n.lock.Unlock()
diff --git a/node/rpcstack.go b/node/rpcstack.go
index b33c238051..1790e051a0 100644
--- a/node/rpcstack.go
+++ b/node/rpcstack.go
@@ -19,6 +19,7 @@ package node
import (
"compress/gzip"
"context"
+ "errors"
"fmt"
"io"
"net"
@@ -56,6 +57,7 @@ type rpcEndpointConfig struct {
jwtSecret []byte // optional JWT secret
batchItemLimit int
batchResponseSizeLimit int
+ httpBodyLimit int
}
type rpcHandler struct {
@@ -298,12 +300,15 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error {
defer h.mu.Unlock()
if h.rpcAllowed() {
- return fmt.Errorf("JSON-RPC over HTTP is already enabled")
+ return errors.New("JSON-RPC over HTTP is already enabled")
}
// Create RPC server and handler.
srv := rpc.NewServer()
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
+ if config.httpBodyLimit > 0 {
+ srv.SetHTTPBodyLimit(config.httpBodyLimit)
+ }
if err := RegisterApis(apis, config.Modules, srv); err != nil {
return err
}
@@ -336,6 +341,9 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error {
// Create RPC server and handler.
srv := rpc.NewServer()
srv.SetBatchLimits(config.batchItemLimit, config.batchResponseSizeLimit)
+ if config.httpBodyLimit > 0 {
+ srv.SetHTTPBodyLimit(config.httpBodyLimit)
+ }
if err := RegisterApis(apis, config.Modules, srv); err != nil {
return err
}
diff --git a/p2p/discover/metrics.go b/p2p/discover/metrics.go
index da8e9cb817..3cd0ab0414 100644
--- a/p2p/discover/metrics.go
+++ b/p2p/discover/metrics.go
@@ -44,7 +44,7 @@ func init() {
}
}
-// meteredConn is a wrapper around a net.UDPConn that meters both the
+// meteredUdpConn is a wrapper around a net.UDPConn that meters both the
// inbound and outbound network traffic.
type meteredUdpConn struct {
UDPConn
@@ -58,7 +58,7 @@ func newMeteredConn(conn UDPConn) UDPConn {
return &meteredUdpConn{UDPConn: conn}
}
-// Read delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way.
+// ReadFromUDP delegates a network read to the underlying connection, bumping the udp ingress traffic meter along the way.
func (c *meteredUdpConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) {
n, addr, err = c.UDPConn.ReadFromUDP(b)
ingressTrafficMeter.Mark(int64(n))
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index 9c1570d51d..dcd8d48ca8 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -26,6 +26,7 @@ import (
"context"
crand "crypto/rand"
"encoding/binary"
+ "errors"
"fmt"
mrand "math/rand"
"net"
@@ -383,7 +384,7 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
if tab.enrFilter != nil {
if !tab.enrFilter(n.Record()) {
tab.log.Trace("ENR record filter out", "id", last.ID(), "addr", last.addr())
- err = fmt.Errorf("filtered node")
+ err = errors.New("filtered node")
}
}
last = &node{Node: *n, addedAt: last.addedAt, livenessChecks: last.livenessChecks}
diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go
index c046898e7c..6312821e0d 100644
--- a/p2p/discover/v4_udp.go
+++ b/p2p/discover/v4_udp.go
@@ -365,7 +365,7 @@ func (t *UDPv4) RequestENR(n *enode.Node) (*enode.Node, error) {
return nil, err
}
if respN.ID() != n.ID() {
- return nil, fmt.Errorf("invalid ID in response record")
+ return nil, errors.New("invalid ID in response record")
}
if respN.Seq() < n.Seq() {
return n, nil // response record is older
diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go
index 8b3e33d37c..71f8d8dd08 100644
--- a/p2p/discover/v5_udp.go
+++ b/p2p/discover/v5_udp.go
@@ -442,7 +442,7 @@ func (t *UDPv5) verifyResponseNode(c *callV5, r *enr.Record, distances []uint, s
}
}
if _, ok := seen[node.ID()]; ok {
- return nil, fmt.Errorf("duplicate record")
+ return nil, errors.New("duplicate record")
}
seen[node.ID()] = struct{}{}
return node, nil
diff --git a/p2p/discover/v5wire/encoding.go b/p2p/discover/v5wire/encoding.go
index 5108910620..904a3ddec6 100644
--- a/p2p/discover/v5wire/encoding.go
+++ b/p2p/discover/v5wire/encoding.go
@@ -367,11 +367,11 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// key is part of the ID nonce signature.
var remotePubkey = new(ecdsa.PublicKey)
if err := challenge.Node.Load((*enode.Secp256k1)(remotePubkey)); err != nil {
- return nil, nil, fmt.Errorf("can't find secp256k1 key for recipient")
+ return nil, nil, errors.New("can't find secp256k1 key for recipient")
}
ephkey, err := c.sc.ephemeralKeyGen()
if err != nil {
- return nil, nil, fmt.Errorf("can't generate ephemeral key")
+ return nil, nil, errors.New("can't generate ephemeral key")
}
ephpubkey := EncodePubkey(&ephkey.PublicKey)
auth.pubkey = ephpubkey[:]
@@ -395,7 +395,7 @@ func (c *Codec) makeHandshakeAuth(toID enode.ID, addr string, challenge *Whoarey
// Create session keys.
sec := deriveKeys(sha256.New, ephkey, remotePubkey, c.localnode.ID(), challenge.Node.ID(), cdata)
if sec == nil {
- return nil, nil, fmt.Errorf("key derivation failed")
+ return nil, nil, errors.New("key derivation failed")
}
return auth, sec, err
}
diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go
index 8f1c221b80..4f14d860e1 100644
--- a/p2p/dnsdisc/client.go
+++ b/p2p/dnsdisc/client.go
@@ -191,7 +191,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry,
func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry, error) {
wantHash, err := b32format.DecodeString(hash)
if err != nil {
- return nil, fmt.Errorf("invalid base32 hash")
+ return nil, errors.New("invalid base32 hash")
}
name := hash + "." + domain
txts, err := c.cfg.Resolver.LookupTXT(ctx, hash+"."+domain)
diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go
index 7d9703a345..dfac4fb372 100644
--- a/p2p/dnsdisc/tree.go
+++ b/p2p/dnsdisc/tree.go
@@ -21,6 +21,7 @@ import (
"crypto/ecdsa"
"encoding/base32"
"encoding/base64"
+ "errors"
"fmt"
"io"
"strings"
@@ -341,7 +342,7 @@ func parseLinkEntry(e string) (entry, error) {
func parseLink(e string) (*linkEntry, error) {
if !strings.HasPrefix(e, linkPrefix) {
- return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL")
+ return nil, errors.New("wrong/missing scheme 'enrtree' in URL")
}
e = e[len(linkPrefix):]
diff --git a/p2p/enode/idscheme.go b/p2p/enode/idscheme.go
index fd5d868b76..6ad7f809a7 100644
--- a/p2p/enode/idscheme.go
+++ b/p2p/enode/idscheme.go
@@ -18,7 +18,7 @@ package enode
import (
"crypto/ecdsa"
- "fmt"
+ "errors"
"io"
"github.com/ethereum/go-ethereum/common/math"
@@ -67,7 +67,7 @@ func (V4ID) Verify(r *enr.Record, sig []byte) error {
if err := r.Load(&entry); err != nil {
return err
} else if len(entry) != 33 {
- return fmt.Errorf("invalid public key")
+ return errors.New("invalid public key")
}
h := sha3.NewLegacyKeccak256()
diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go
index 988e10dd76..387c1dc1cb 100644
--- a/p2p/nat/natpmp.go
+++ b/p2p/nat/natpmp.go
@@ -17,6 +17,7 @@
package nat
import (
+ "errors"
"fmt"
"net"
"strings"
@@ -47,7 +48,7 @@ func (n *pmp) ExternalIP() (net.IP, error) {
func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) (uint16, error) {
if lifetime <= 0 {
- return 0, fmt.Errorf("lifetime must not be <= 0")
+ return 0, errors.New("lifetime must not be <= 0")
}
// Note order of port arguments is switched between our
// AddMapping and the client's AddPortMapping.
diff --git a/p2p/server.go b/p2p/server.go
index 01e5b68d4f..4d810aed66 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -992,13 +992,13 @@ func (srv *Server) checkInboundConn(remoteIP net.IP) error {
// Reject connections that do not match NetRestrict.
if srv.NetRestrict != nil && !srv.NetRestrict.Contains(remoteIP) {
- return fmt.Errorf("not in netrestrict list")
+ return errors.New("not in netrestrict list")
}
// Reject Internet peers that try too often.
now := srv.clock.Now()
srv.inboundHistory.expire(now, nil)
if !netutil.IsLAN(remoteIP) && srv.inboundHistory.contains(remoteIP.String()) {
- return fmt.Errorf("too many attempts")
+ return errors.New("too many attempts")
}
srv.inboundHistory.add(remoteIP.String(), now.Add(inboundThrottleTime))
return nil
diff --git a/p2p/server_nat.go b/p2p/server_nat.go
index 354597cc7a..299d275490 100644
--- a/p2p/server_nat.go
+++ b/p2p/server_nat.go
@@ -127,7 +127,7 @@ func (srv *Server) portMappingLoop() {
} else if !ip.Equal(lastExtIP) {
log.Debug("External IP changed", "ip", extip, "interface", srv.NAT)
} else {
- return
+ continue
}
// Here, we either failed to get the external IP, or it has changed.
lastExtIP = ip
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index c52917fd0a..349e496b2f 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -172,7 +172,7 @@ type SimNode struct {
registerOnce sync.Once
}
-// Close closes the underlaying node.Node to release
+// Close closes the underlying node.Node to release
// acquired resources.
func (sn *SimNode) Close() error {
return sn.node.Close()
diff --git a/p2p/transport.go b/p2p/transport.go
index 1e62167e0f..c8fc3c92aa 100644
--- a/p2p/transport.go
+++ b/p2p/transport.go
@@ -19,6 +19,7 @@ package p2p
import (
"bytes"
"crypto/ecdsa"
+ "errors"
"fmt"
"io"
"net"
@@ -158,7 +159,7 @@ func readProtocolHandshake(rw MsgReader) (*protoHandshake, error) {
return nil, err
}
if msg.Size > baseProtocolMaxMsgSize {
- return nil, fmt.Errorf("message too big")
+ return nil, errors.New("message too big")
}
if msg.Code == discMsg {
// Disconnect before protocol handshake is valid according to the
diff --git a/params/config.go b/params/config.go
index 1e460f0745..9824658f1a 100644
--- a/params/config.go
+++ b/params/config.go
@@ -64,6 +64,7 @@ var (
TerminalTotalDifficulty: MainnetTerminalTotalDifficulty, // 58_750_000_000_000_000_000_000
TerminalTotalDifficultyPassed: true,
ShanghaiTime: newUint64(1681338455),
+ CancunTime: newUint64(1710338135),
Ethash: new(EthashConfig),
}
// SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network.
@@ -951,7 +952,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
lastFork.name, cur.name, cur.block)
} else {
return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at timestamp %v",
- lastFork.name, cur.name, cur.timestamp)
+ lastFork.name, cur.name, *cur.timestamp)
}
// Fork (whether defined by block or timestamp) must follow the fork definition sequence
@@ -961,7 +962,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
lastFork.name, lastFork.block, cur.name, cur.block)
} else if lastFork.timestamp != nil && *lastFork.timestamp > *cur.timestamp {
return fmt.Errorf("unsupported fork ordering: %v enabled at timestamp %v, but %v enabled at timestamp %v",
- lastFork.name, lastFork.timestamp, cur.name, cur.timestamp)
+ lastFork.name, *lastFork.timestamp, cur.name, *cur.timestamp)
}
// Timestamp based forks can follow block based ones, but not the other way around
@@ -1269,6 +1270,8 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules
if chainID == nil {
chainID = new(big.Int)
}
+ // disallow setting Merge out of order
+ isMerge = isMerge && c.IsLondon(num)
return Rules{
ChainID: new(big.Int).Set(chainID),
IsHomestead: c.IsHomestead(num),
diff --git a/params/version.go b/params/version.go
index 07ea25bcba..a68d4a1279 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 4 // Minor version component of the current release
- VersionPatch = 1 // Patch version component of the current release
+ VersionPatch = 2 // Patch version component of the current release
VersionMeta = "" // Version metadata to append to the version string
)
diff --git a/rpc/http.go b/rpc/http.go
index b96faccc02..14f998995b 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -33,8 +33,8 @@ import (
)
const (
- maxRequestContentLength = 1024 * 1024 * 5
- contentType = "application/json"
+ defaultBodyLimit = 5 * 1024 * 1024
+ contentType = "application/json"
)
// https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13
@@ -253,8 +253,8 @@ type httpServerConn struct {
r *http.Request
}
-func newHTTPServerConn(r *http.Request, w http.ResponseWriter) ServerCodec {
- body := io.LimitReader(r.Body, maxRequestContentLength)
+func (s *Server) newHTTPServerConn(r *http.Request, w http.ResponseWriter) ServerCodec {
+ body := io.LimitReader(r.Body, int64(s.httpBodyLimit))
conn := &httpServerConn{Reader: body, Writer: w, r: r}
encoder := func(v any, isErrorResponse bool) error {
@@ -312,7 +312,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
return
}
- if code, err := validateRequest(r); err != nil {
+ if code, err := s.validateRequest(r); err != nil {
http.Error(w, err.Error(), code)
return
}
@@ -343,19 +343,19 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("content-type", contentType)
- codec := newHTTPServerConn(r, w)
+ codec := s.newHTTPServerConn(r, w)
defer codec.close()
s.serveSingleRequest(ctx, codec)
}
// validateRequest returns a non-zero response code and error message if the
// request is invalid.
-func validateRequest(r *http.Request) (int, error) {
+func (s *Server) validateRequest(r *http.Request) (int, error) {
if r.Method == http.MethodPut || r.Method == http.MethodDelete {
return http.StatusMethodNotAllowed, errors.New("method not allowed")
}
- if r.ContentLength > maxRequestContentLength {
- err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength)
+ if r.ContentLength > int64(s.httpBodyLimit) {
+ err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, s.httpBodyLimit)
return http.StatusRequestEntityTooLarge, err
}
// Allow OPTIONS (regardless of content-type)
diff --git a/rpc/http_test.go b/rpc/http_test.go
index 584842a9aa..ad86ca15ae 100644
--- a/rpc/http_test.go
+++ b/rpc/http_test.go
@@ -40,11 +40,13 @@ func confirmStatusCode(t *testing.T, got, want int) {
func confirmRequestValidationCode(t *testing.T, method, contentType, body string, expectedStatusCode int) {
t.Helper()
+
+ s := NewServer()
request := httptest.NewRequest(method, "http://url.com", strings.NewReader(body))
if len(contentType) > 0 {
request.Header.Set("Content-Type", contentType)
}
- code, err := validateRequest(request)
+ code, err := s.validateRequest(request)
if code == 0 {
if err != nil {
t.Errorf("validation: got error %v, expected nil", err)
@@ -64,7 +66,7 @@ func TestHTTPErrorResponseWithPut(t *testing.T) {
}
func TestHTTPErrorResponseWithMaxContentLength(t *testing.T) {
- body := make([]rune, maxRequestContentLength+1)
+ body := make([]rune, defaultBodyLimit+1)
confirmRequestValidationCode(t,
http.MethodPost, contentType, string(body), http.StatusRequestEntityTooLarge)
}
@@ -104,7 +106,7 @@ func TestHTTPResponseWithEmptyGet(t *testing.T) {
// This checks that maxRequestContentLength is not applied to the response of a request.
func TestHTTPRespBodyUnlimited(t *testing.T) {
- const respLength = maxRequestContentLength * 3
+ const respLength = defaultBodyLimit * 3
s := NewServer()
defer s.Stop()
diff --git a/rpc/server.go b/rpc/server.go
index bdf5042cd0..bea7bd9e9b 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -51,13 +51,15 @@ type Server struct {
run atomic.Bool
batchItemLimit int
batchResponseLimit int
+ httpBodyLimit int
}
// NewServer creates a new server instance with no registered handlers.
func NewServer() *Server {
server := &Server{
- idgen: randomIDGenerator(),
- codecs: make(map[ServerCodec]struct{}),
+ idgen: randomIDGenerator(),
+ codecs: make(map[ServerCodec]struct{}),
+ httpBodyLimit: defaultBodyLimit,
}
server.run.Store(true)
// Register the default service providing meta information about the RPC service such
@@ -78,6 +80,13 @@ func (s *Server) SetBatchLimits(itemLimit, maxResponseSize int) {
s.batchResponseLimit = maxResponseSize
}
+// SetHTTPBodyLimit sets the size limit for HTTP requests.
+//
+// This method should be called before processing any requests via ServeHTTP.
+func (s *Server) SetHTTPBodyLimit(limit int) {
+ s.httpBodyLimit = limit
+}
+
// RegisterName creates a service for the given receiver type under the given name. When no
// methods on the given receiver match the criteria to be either a RPC method or a
// subscription an error is returned. Otherwise a new service is created and added to the
diff --git a/rpc/types.go b/rpc/types.go
index f88c37c59d..d124081786 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -19,6 +19,7 @@ package rpc
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"math"
"strings"
@@ -104,7 +105,7 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error {
return err
}
if blckNum > math.MaxInt64 {
- return fmt.Errorf("block number larger than int64")
+ return errors.New("block number larger than int64")
}
*bn = BlockNumber(blckNum)
return nil
@@ -154,7 +155,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &e)
if err == nil {
if e.BlockNumber != nil && e.BlockHash != nil {
- return fmt.Errorf("cannot specify both BlockHash and BlockNumber, choose one or the other")
+ return errors.New("cannot specify both BlockHash and BlockNumber, choose one or the other")
}
bnh.BlockNumber = e.BlockNumber
bnh.BlockHash = e.BlockHash
@@ -202,7 +203,7 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error {
return err
}
if blckNum > math.MaxInt64 {
- return fmt.Errorf("blocknumber too high")
+ return errors.New("blocknumber too high")
}
bn := BlockNumber(blckNum)
bnh.BlockNumber = &bn
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index d3e15d94c9..8d2bd9d802 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -97,7 +97,7 @@ func TestWebsocketLargeCall(t *testing.T) {
// This call sends slightly less than the limit and should work.
var result echoResult
- arg := strings.Repeat("x", maxRequestContentLength-200)
+ arg := strings.Repeat("x", defaultBodyLimit-200)
if err := client.Call(&result, "test_echo", arg, 1); err != nil {
t.Fatalf("valid call didn't work: %v", err)
}
@@ -106,7 +106,7 @@ func TestWebsocketLargeCall(t *testing.T) {
}
// This call sends twice the allowed size and shouldn't work.
- arg = strings.Repeat("x", maxRequestContentLength*2)
+ arg = strings.Repeat("x", defaultBodyLimit*2)
err = client.Call(&result, "test_echo", arg)
if err == nil {
t.Fatal("no error for too large call")
diff --git a/signer/core/api.go b/signer/core/api.go
index ef8c136625..a32f24cb18 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -631,7 +631,7 @@ func (api *SignerAPI) SignGnosisSafeTx(ctx context.Context, signerAddress common
}
}
typedData := gnosisTx.ToTypedData()
- // might aswell error early.
+ // might as well error early.
// we are expected to sign. If our calculated hash does not match what they want,
// The gnosis safetx input contains a 'safeTxHash' which is the expected safeTxHash that
sighash, _, err := apitypes.TypedDataAndHash(typedData)
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 64279ce027..af2c2cb723 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -712,7 +712,7 @@ func formatPrimitiveValue(encType string, encValue interface{}) (string, error)
func (t Types) validate() error {
for typeKey, typeArr := range t {
if len(typeKey) == 0 {
- return fmt.Errorf("empty type key")
+ return errors.New("empty type key")
}
for i, typeObj := range typeArr {
if len(typeObj.Type) == 0 {
diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go
index 6f10da6553..5312f87516 100644
--- a/signer/core/signed_data.go
+++ b/signer/core/signed_data.go
@@ -349,7 +349,7 @@ func (api *SignerAPI) EcRecover(ctx context.Context, data hexutil.Bytes, sig hex
// Note, the signature must conform to the secp256k1 curve R, S and V values, where
// the V value must be 27 or 28 for legacy reasons.
//
- // https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover
+ // https://geth.ethereum.org/docs/tools/clef/apis#account-ecrecover
if len(sig) != 65 {
return common.Address{}, errors.New("signature must be 65 bytes long")
}
diff --git a/tests/block_test.go b/tests/block_test.go
index 77681716d6..5d6669e9d4 100644
--- a/tests/block_test.go
+++ b/tests/block_test.go
@@ -62,14 +62,14 @@ func TestBlockchain(t *testing.T) {
// which run natively, so there's no reason to run them here.
}
-// TestExecutionSpec runs the test fixtures from execution-spec-tests.
-func TestExecutionSpec(t *testing.T) {
- if !common.FileExist(executionSpecDir) {
- t.Skipf("directory %s does not exist", executionSpecDir)
+// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests.
+func TestExecutionSpecBlocktests(t *testing.T) {
+ if !common.FileExist(executionSpecBlockchainTestDir) {
+ t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir)
}
bt := new(testMatcher)
- bt.walk(t, executionSpecDir, func(t *testing.T, name string, test *BlockTest) {
+ bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test)
})
}
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 50adee3e0b..52cbf1e1ac 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
// A BlockTest checks handling of entire blocks.
@@ -57,8 +57,8 @@ func (t *BlockTest) UnmarshalJSON(in []byte) error {
type btJSON struct {
Blocks []btBlock `json:"blocks"`
Genesis btHeader `json:"genesisBlockHeader"`
- Pre core.GenesisAlloc `json:"pre"`
- Post core.GenesisAlloc `json:"postState"`
+ Pre types.GenesisAlloc `json:"pre"`
+ Post types.GenesisAlloc `json:"postState"`
BestBlock common.UnprefixedHash `json:"lastblockhash"`
Network string `json:"network"`
SealEngine string `json:"sealEngine"`
@@ -117,7 +117,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po
// import pre accounts & construct test genesis block & state root
var (
db = rawdb.NewMemoryDatabase()
- tconf = &trie.Config{
+ tconf = &triedb.Config{
Preimages: true,
}
)
@@ -128,7 +128,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger, po
}
// Commit genesis state
gspec := t.genesis(config)
- triedb := trie.NewDatabase(db, tconf)
+ triedb := triedb.NewDatabase(db, tconf)
gblock, err := gspec.Commit(db, triedb)
if err != nil {
return err
diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
index 6b5ca90880..dcafebb265 100644
--- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
+++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/triedb"
"golang.org/x/exp/slices"
)
@@ -56,7 +57,7 @@ func (f *fuzzer) readInt() uint64 {
}
func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) {
- trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := trie.NewEmpty(triedb.NewDatabase(rawdb.NewMemoryDatabase(), nil))
vals := make(map[string]*kv)
size := f.readInt()
// Fill it with some fluff
diff --git a/tests/init_test.go b/tests/init_test.go
index 25511fcc07..f9b6025d54 100644
--- a/tests/init_test.go
+++ b/tests/init_test.go
@@ -34,15 +34,16 @@ import (
)
var (
- baseDir = filepath.Join(".", "testdata")
- blockTestDir = filepath.Join(baseDir, "BlockchainTests")
- stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
- legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
- transactionTestDir = filepath.Join(baseDir, "TransactionTests")
- rlpTestDir = filepath.Join(baseDir, "RLPTests")
- difficultyTestDir = filepath.Join(baseDir, "BasicTests")
- executionSpecDir = filepath.Join(".", "spec-tests", "fixtures")
- benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
+ baseDir = filepath.Join(".", "testdata")
+ blockTestDir = filepath.Join(baseDir, "BlockchainTests")
+ stateTestDir = filepath.Join(baseDir, "GeneralStateTests")
+ legacyStateTestDir = filepath.Join(baseDir, "LegacyTests", "Constantinople", "GeneralStateTests")
+ transactionTestDir = filepath.Join(baseDir, "TransactionTests")
+ rlpTestDir = filepath.Join(baseDir, "RLPTests")
+ difficultyTestDir = filepath.Join(baseDir, "BasicTests")
+ executionSpecBlockchainTestDir = filepath.Join(".", "spec-tests", "fixtures", "blockchain_tests")
+ executionSpecStateTestDir = filepath.Join(".", "spec-tests", "fixtures", "state_tests")
+ benchmarksDir = filepath.Join(".", "evm-benchmarks", "benchmarks")
)
func readJSON(reader io.Reader, value interface{}) error {
diff --git a/tests/state_test.go b/tests/state_test.go
index 18d8a0b50e..1d749d8bcf 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -30,20 +30,16 @@ import (
"testing"
"time"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
- "github.com/ethereum/go-ethereum/core/state"
- "github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/holiman/uint256"
)
-func TestState(t *testing.T) {
- t.Parallel()
-
- st := new(testMatcher)
+func initMatcher(st *testMatcher) {
// Long tests:
st.slow(`^stAttackTest/ContractCreationSpam`)
st.slow(`^stBadOpcode/badOpcodes`)
@@ -62,76 +58,102 @@ func TestState(t *testing.T) {
// Broken tests:
// EOF is not part of cancun
st.skipLoad(`^stEOF/`)
+}
- // For Istanbul, older tests were moved into LegacyTests
+func TestState(t *testing.T) {
+ t.Parallel()
+
+ st := new(testMatcher)
+ initMatcher(st)
for _, dir := range []string{
filepath.Join(baseDir, "EIPTests", "StateTests"),
stateTestDir,
- legacyStateTestDir,
benchmarksDir,
} {
st.walk(t, dir, func(t *testing.T, name string, test *StateTest) {
- if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 {
- t.Skip("test (randomly) skipped on 32-bit windows")
- return
- }
- for _, subtest := range test.Subtests() {
- // TODO(Nathan): fix before enable Cancun
- if subtest.Fork == "Cancun" {
- return
- }
- subtest := subtest
- key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
+ execStateTest(t, st, test)
+ })
+ }
+}
- t.Run(key+"/hash/trie", func(t *testing.T) {
- withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- var result error
- test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
- result = st.checkFailure(t, err)
- })
- return result
- })
+// TestLegacyState tests some older tests, which were moved to the folder
+// 'LegacyTests' for the Istanbul fork.
+func TestLegacyState(t *testing.T) {
+ st := new(testMatcher)
+ initMatcher(st)
+ st.walk(t, legacyStateTestDir, func(t *testing.T, name string, test *StateTest) {
+ execStateTest(t, st, test)
+ })
+}
+
+// TestExecutionSpecState runs the test fixtures from execution-spec-tests.
+func TestExecutionSpecState(t *testing.T) {
+ if !common.FileExist(executionSpecStateTestDir) {
+ t.Skipf("directory %s does not exist", executionSpecStateTestDir)
+ }
+ st := new(testMatcher)
+
+ st.walk(t, executionSpecStateTestDir, func(t *testing.T, name string, test *StateTest) {
+ execStateTest(t, st, test)
+ })
+}
+
+func execStateTest(t *testing.T, st *testMatcher, test *StateTest) {
+ if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 {
+ t.Skip("test (randomly) skipped on 32-bit windows")
+ return
+ }
+ for _, subtest := range test.Subtests() {
+ subtest := subtest
+ key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
+
+ t.Run(key+"/hash/trie", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, state *StateTestState) {
+ result = st.checkFailure(t, err)
})
- t.Run(key+"/hash/snap", func(t *testing.T) {
- withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- var result error
- test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
- if snaps != nil && state != nil {
- if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
- result = err
- return
- }
- }
- result = st.checkFailure(t, err)
- })
- return result
- })
+ return result
+ })
+ })
+ t.Run(key+"/hash/snap", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, state *StateTestState) {
+ if state.Snapshots != nil && state.StateDB != nil {
+ if _, err := state.Snapshots.Journal(state.StateDB.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
+ }
+ result = st.checkFailure(t, err)
})
- t.Run(key+"/path/trie", func(t *testing.T) {
- withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- var result error
- test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
- result = st.checkFailure(t, err)
- })
- return result
- })
+ return result
+ })
+ })
+ t.Run(key+"/path/trie", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, state *StateTestState) {
+ result = st.checkFailure(t, err)
})
- t.Run(key+"/path/snap", func(t *testing.T) {
- withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
- var result error
- test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
- if snaps != nil && state != nil {
- if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil {
- result = err
- return
- }
- }
- result = st.checkFailure(t, err)
- })
- return result
- })
+ return result
+ })
+ })
+ t.Run(key+"/path/snap", func(t *testing.T) {
+ withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
+ var result error
+ test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, state *StateTestState) {
+ if state.Snapshots != nil && state.StateDB != nil {
+ if _, err := state.Snapshots.Journal(state.StateDB.IntermediateRoot(false)); err != nil {
+ result = err
+ return
+ }
+ }
+ result = st.checkFailure(t, err)
})
- }
+ return result
+ })
})
}
}
@@ -226,8 +248,8 @@ func runBenchmark(b *testing.B, t *StateTest) {
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock()
- triedb, _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme)
- defer triedb.Close()
+ state := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme)
+ defer state.Close()
var baseFee *big.Int
if rules.IsLondon {
@@ -265,7 +287,7 @@ func runBenchmark(b *testing.B, t *StateTest) {
context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase)
context.GetHash = vmTestBlockHash
context.BaseFee = baseFee
- evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
+ evm := vm.NewEVM(context, txContext, state.StateDB, config, vmconfig)
// Create "contract" for sender to cache code analysis.
sender := vm.NewContract(vm.AccountRef(msg.From), vm.AccountRef(msg.From),
@@ -278,8 +300,8 @@ func runBenchmark(b *testing.B, t *StateTest) {
)
b.ResetTimer()
for n := 0; n < b.N; n++ {
- snapshot := statedb.Snapshot()
- statedb.Prepare(rules, msg.From, context.Coinbase, msg.To, vm.ActivePrecompiles(rules), msg.AccessList)
+ snapshot := state.StateDB.Snapshot()
+ state.StateDB.Prepare(rules, msg.From, context.Coinbase, msg.To, vm.ActivePrecompiles(rules), msg.AccessList)
b.StartTimer()
start := time.Now()
@@ -292,10 +314,10 @@ func runBenchmark(b *testing.B, t *StateTest) {
b.StopTimer()
elapsed += uint64(time.Since(start))
- refund += statedb.GetRefund()
+ refund += state.StateDB.GetRefund()
gasUsed += msg.GasLimit - leftOverGas
- statedb.RevertToSnapshot(snapshot)
+ state.StateDB.RevertToSnapshot(snapshot)
}
if elapsed < 1 {
elapsed = 1
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 7253ec540a..5f1694ae7b 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -39,9 +39,9 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
- "github.com/ethereum/go-ethereum/trie"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/triedb"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3"
)
@@ -64,7 +64,7 @@ func (t *StateTest) UnmarshalJSON(in []byte) error {
type stJSON struct {
Env stEnv `json:"env"`
- Pre core.GenesisAlloc `json:"pre"`
+ Pre types.GenesisAlloc `json:"pre"`
Tx stTransaction `json:"transaction"`
Out hexutil.Bytes `json:"out"`
Post map[string][]stPostState `json:"post"`
@@ -194,20 +194,14 @@ func (t *StateTest) checkError(subtest StateSubtest, err error) error {
}
// Run executes a specific subtest and verifies the post-state and logs
-func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, snaps *snapshot.Tree, state *state.StateDB)) (result error) {
- triedb, snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme)
-
+func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, st *StateTestState)) (result error) {
+ st, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme)
// Invoke the callback at the end of function for further analysis.
defer func() {
- postCheck(result, snaps, statedb)
-
- if triedb != nil {
- triedb.Close()
- }
- if snaps != nil {
- snaps.Release()
- }
+ postCheck(result, &st)
+ st.Close()
}()
+
checkedErr := t.checkError(subtest, err)
if checkedErr != nil {
return checkedErr
@@ -224,23 +218,24 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo
if root != common.Hash(post.Root) {
return fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root)
}
- if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) {
+ if logs := rlpHash(st.StateDB.Logs()); logs != common.Hash(post.Logs) {
return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs)
}
- statedb, _ = state.New(root, statedb.Database(), snaps)
+ st.StateDB, _ = state.New(root, st.StateDB.Database(), st.Snapshots)
return nil
}
-// RunNoVerify runs a specific subtest and returns the statedb and post-state root
-func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) {
+// RunNoVerify runs a specific subtest and returns the statedb and post-state root.
+// Remember to call state.Close after verifying the test result!
+func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (state StateTestState, root common.Hash, err error) {
config, eips, err := GetChainConfig(subtest.Fork)
if err != nil {
- return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
+ return state, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock()
- triedb, snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme)
+ state = MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme)
var baseFee *big.Int
if config.IsLondon(new(big.Int)) {
@@ -254,8 +249,18 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post, baseFee)
if err != nil {
- triedb.Close()
- return nil, nil, nil, common.Hash{}, err
+ return state, common.Hash{}, err
+ }
+
+ { // Blob transactions may be present after the Cancun fork.
+ // In production,
+ // - the header is verified against the max in eip4844.go:VerifyEIP4844Header
+ // - the block body is verified against the header in block_validator.go:ValidateBody
+ // Here, we just do this shortcut smaller fix, since state tests do not
+ // utilize those codepaths
+ if len(msg.BlobHashes)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
+ return state, common.Hash{}, errors.New("blob gas exceeds maximum")
+ }
}
// Try to recover tx with current signer
@@ -263,13 +268,10 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
var ttx types.Transaction
err := ttx.UnmarshalBinary(post.TxBytes)
if err != nil {
- triedb.Close()
- return nil, nil, nil, common.Hash{}, err
+ return state, common.Hash{}, err
}
-
if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil {
- triedb.Close()
- return nil, nil, nil, common.Hash{}, err
+ return state, common.Hash{}, err
}
}
@@ -290,26 +292,15 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
if config.IsCancun(new(big.Int), block.Time()) && t.json.Env.ExcessBlobGas != nil {
context.BlobBaseFee = eip4844.CalcBlobFee(*t.json.Env.ExcessBlobGas)
}
- evm := vm.NewEVM(context, txContext, statedb, config, vmconfig)
-
- { // Blob transactions may be present after the Cancun fork.
- // In production,
- // - the header is verified against the max in eip4844.go:VerifyEIP4844Header
- // - the block body is verified against the header in block_validator.go:ValidateBody
- // Here, we just do this shortcut smaller fix, since state tests do not
- // utilize those codepaths
- if len(msg.BlobHashes)*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock {
- return nil, nil, nil, common.Hash{}, errors.New("blob gas exceeds maximum")
- }
- }
+ evm := vm.NewEVM(context, txContext, state.StateDB, config, vmconfig)
// Execute the message.
- snapshot := statedb.Snapshot()
+ snapshot := state.StateDB.Snapshot()
gaspool := new(core.GasPool)
gaspool.AddGas(block.GasLimit())
_, err = core.ApplyMessage(evm, msg, gaspool)
if err != nil {
- statedb.RevertToSnapshot(snapshot)
+ state.StateDB.RevertToSnapshot(snapshot)
}
// Commit block
@@ -318,55 +309,18 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
// - the coinbase self-destructed, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
- statedb.AddBalance(block.Coinbase(), new(uint256.Int))
+ state.StateDB.AddBalance(block.Coinbase(), new(uint256.Int))
// And _now_ get the state root
- root := statedb.IntermediateRoot(config.IsEIP158(block.Number()))
- statedb.SetExpectedStateRoot(root)
- root, _, _ = statedb.Commit(block.NumberU64(), nil)
- return triedb, snaps, statedb, root, err
+ root = state.StateDB.IntermediateRoot(config.IsEIP158(block.Number()))
+ state.StateDB.SetExpectedStateRoot(root)
+ root, _, _ = state.StateDB.Commit(block.NumberU64(), nil)
+ return state, root, err
}
func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
-func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) {
- tconf := &trie.Config{Preimages: true}
- if scheme == rawdb.HashScheme {
- tconf.HashDB = hashdb.Defaults
- } else {
- tconf.PathDB = pathdb.Defaults
- }
- triedb := trie.NewDatabase(db, tconf)
- sdb := state.NewDatabaseWithNodeDB(db, triedb)
- statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
- for addr, a := range accounts {
- statedb.SetCode(addr, a.Code)
- statedb.SetNonce(addr, a.Nonce)
- statedb.SetBalance(addr, uint256.MustFromBig(a.Balance))
- for k, v := range a.Storage {
- statedb.SetState(addr, k, v)
- }
- }
- // Commit and re-open to start with a clean state.
- statedb.Finalise(false)
- statedb.AccountsIntermediateRoot()
- root, _, _ := statedb.Commit(0, nil)
-
- var snaps *snapshot.Tree
- if snapshotter {
- snapconfig := snapshot.Config{
- CacheSize: 1,
- Recovery: false,
- NoBuild: false,
- AsyncBuild: false,
- }
- snaps, _ = snapshot.New(snapconfig, db, triedb, root, 128, false)
- }
- statedb, _ = state.New(root, sdb, snaps)
- return triedb, snaps, statedb
-}
-
func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis {
genesis := &core.Genesis{
Config: config,
@@ -483,3 +437,64 @@ func rlpHash(x interface{}) (h common.Hash) {
func vmTestBlockHash(n uint64) common.Hash {
return common.BytesToHash(crypto.Keccak256([]byte(big.NewInt(int64(n)).String())))
}
+
+// StateTestState groups all the state database objects together for use in tests.
+type StateTestState struct {
+ StateDB *state.StateDB
+ TrieDB *triedb.Database
+ Snapshots *snapshot.Tree
+}
+
+// MakePreState creates a state containing the given allocation.
+func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bool, scheme string) StateTestState {
+ tconf := &triedb.Config{Preimages: true}
+ if scheme == rawdb.HashScheme {
+ tconf.HashDB = hashdb.Defaults
+ } else {
+ tconf.PathDB = pathdb.Defaults
+ }
+ triedb := triedb.NewDatabase(db, tconf)
+ sdb := state.NewDatabaseWithNodeDB(db, triedb)
+ statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
+ for addr, a := range accounts {
+ statedb.SetCode(addr, a.Code)
+ statedb.SetNonce(addr, a.Nonce)
+ statedb.SetBalance(addr, uint256.MustFromBig(a.Balance))
+ for k, v := range a.Storage {
+ statedb.SetState(addr, k, v)
+ }
+ }
+
+ // Commit and re-open to start with a clean state.
+ root := statedb.IntermediateRoot(false)
+ statedb.SetExpectedStateRoot(root)
+ root, _, _ = statedb.Commit(0, nil)
+
+ // If snapshot is requested, initialize the snapshotter and use it in state.
+ var snaps *snapshot.Tree
+ if snapshotter {
+ snapconfig := snapshot.Config{
+ CacheSize: 1,
+ Recovery: false,
+ NoBuild: false,
+ AsyncBuild: false,
+ }
+ snaps, _ = snapshot.New(snapconfig, db, triedb, root, 128, false)
+ }
+ statedb, _ = state.New(root, sdb, snaps)
+ return StateTestState{statedb, triedb, snaps}
+}
+
+// Close should be called when the state is no longer needed, ie. after running the test.
+func (st *StateTestState) Close() {
+ if st.TrieDB != nil {
+ st.TrieDB.Close()
+ st.TrieDB = nil
+ }
+ if st.Snapshots != nil {
+ // Need to call Disable here to quit the snapshot generator goroutine.
+ st.Snapshots.Disable()
+ st.Snapshots.Release()
+ st.Snapshots = nil
+ }
+}
diff --git a/trie/committer.go b/trie/committer.go
index 4b222f9710..7de13242b2 100644
--- a/trie/committer.go
+++ b/trie/committer.go
@@ -183,12 +183,12 @@ func estimateSize(n node) int {
}
}
-// mptResolver the children resolver in merkle-patricia-tree.
-type mptResolver struct{}
+// MerkleResolver the children resolver in merkle-patricia-tree.
+type MerkleResolver struct{}
// ForEach implements childResolver, decodes the provided node and
// traverses the children inside.
-func (resolver mptResolver) ForEach(node []byte, onChild func(common.Hash)) {
+func (resolver MerkleResolver) ForEach(node []byte, onChild func(common.Hash)) {
forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
}
diff --git a/trie/database_test.go b/trie/database_test.go
index d508c65533..aed508b368 100644
--- a/trie/database_test.go
+++ b/trie/database_test.go
@@ -17,24 +17,136 @@
package trie
import (
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb/database"
)
-// newTestDatabase initializes the trie database with specified scheme.
-func newTestDatabase(diskdb ethdb.Database, scheme string) *Database {
- config := &Config{Preimages: false}
- if scheme == rawdb.HashScheme {
- config.HashDB = &hashdb.Config{
- CleanCacheSize: 0,
- } // disable clean cache
- } else {
- config.PathDB = &pathdb.Config{
- CleanCacheSize: 0,
- DirtyCacheSize: 0,
- } // disable clean/dirty cache
- }
- return NewDatabase(diskdb, config)
+// testReader implements database.Reader interface, providing function to
+// access trie nodes.
+type testReader struct {
+ db ethdb.Database
+ scheme string
+ nodes []*trienode.MergedNodeSet // sorted from new to old
+}
+
+// Node implements database.Reader interface, retrieving trie node with
+// all available cached layers.
+func (r *testReader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
+ // Check the node presence with the cached layer, from latest to oldest.
+ for _, nodes := range r.nodes {
+ if _, ok := nodes.Sets[owner]; !ok {
+ continue
+ }
+ n, ok := nodes.Sets[owner].Nodes[string(path)]
+ if !ok {
+ continue
+ }
+ if n.IsDeleted() || n.Hash != hash {
+ return nil, &MissingNodeError{Owner: owner, Path: path, NodeHash: hash}
+ }
+ return n.Blob, nil
+ }
+ // Check the node presence in database.
+ return rawdb.ReadTrieNode(r.db, owner, path, hash, r.scheme), nil
+}
+
+// testDb implements database.Database interface, using for testing purpose.
+type testDb struct {
+ disk ethdb.Database
+ root common.Hash
+ scheme string
+ nodes map[common.Hash]*trienode.MergedNodeSet
+ parents map[common.Hash]common.Hash
+}
+
+func newTestDatabase(diskdb ethdb.Database, scheme string) *testDb {
+ return &testDb{
+ disk: diskdb,
+ root: types.EmptyRootHash,
+ scheme: scheme,
+ nodes: make(map[common.Hash]*trienode.MergedNodeSet),
+ parents: make(map[common.Hash]common.Hash),
+ }
+}
+
+func (db *testDb) Reader(stateRoot common.Hash) (database.Reader, error) {
+ nodes, _ := db.dirties(stateRoot, true)
+ return &testReader{db: db.disk, scheme: db.scheme, nodes: nodes}, nil
+}
+
+func (db *testDb) Preimage(hash common.Hash) []byte {
+ return rawdb.ReadPreimage(db.disk, hash)
+}
+
+func (db *testDb) InsertPreimage(preimages map[common.Hash][]byte) {
+ rawdb.WritePreimages(db.disk, preimages)
+}
+
+func (db *testDb) Scheme() string { return db.scheme }
+
+func (db *testDb) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
+ if root == parent {
+ return nil
+ }
+ if _, ok := db.nodes[root]; ok {
+ return nil
+ }
+ db.parents[root] = parent
+ db.nodes[root] = nodes
+ return nil
+}
+
+func (db *testDb) dirties(root common.Hash, topToBottom bool) ([]*trienode.MergedNodeSet, []common.Hash) {
+ var (
+ pending []*trienode.MergedNodeSet
+ roots []common.Hash
+ )
+ for {
+ if root == db.root {
+ break
+ }
+ nodes, ok := db.nodes[root]
+ if !ok {
+ break
+ }
+ if topToBottom {
+ pending = append(pending, nodes)
+ roots = append(roots, root)
+ } else {
+ pending = append([]*trienode.MergedNodeSet{nodes}, pending...)
+ roots = append([]common.Hash{root}, roots...)
+ }
+ root = db.parents[root]
+ }
+ return pending, roots
+}
+
+func (db *testDb) Commit(root common.Hash) error {
+ if root == db.root {
+ return nil
+ }
+ pending, roots := db.dirties(root, false)
+ for i, nodes := range pending {
+ for owner, set := range nodes.Sets {
+ if owner == (common.Hash{}) {
+ continue
+ }
+ set.ForEachWithOrder(func(path string, n *trienode.Node) {
+ rawdb.WriteTrieNode(db.disk, owner, []byte(path), n.Hash, n.Blob, db.scheme)
+ })
+ }
+ nodes.Sets[common.Hash{}].ForEachWithOrder(func(path string, n *trienode.Node) {
+ rawdb.WriteTrieNode(db.disk, common.Hash{}, []byte(path), n.Hash, n.Blob, db.scheme)
+ })
+ db.root = roots[i]
+ }
+ for _, root := range roots {
+ delete(db.nodes, root)
+ delete(db.parents, root)
+ }
+ return nil
}
diff --git a/trie/hbss2pbss.go b/trie/hbss2pbss.go
index 1faaf34783..be7efa3a92 100644
--- a/trie/hbss2pbss.go
+++ b/trie/hbss2pbss.go
@@ -19,7 +19,7 @@ import (
type Hbss2Pbss struct {
trie *Trie // traverse trie
- db *Database
+ db Database
blocknum uint64
root node // root of triedb
stateRootHash common.Hash
@@ -33,7 +33,7 @@ const (
)
// NewHbss2Pbss return a hash2Path obj
-func NewHbss2Pbss(tr *Trie, db *Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Hbss2Pbss, error) {
+func NewHbss2Pbss(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Hbss2Pbss, error) {
if tr == nil {
return nil, errors.New("trie is nil")
}
@@ -68,10 +68,10 @@ func (t *Trie) resloveWithoutTrack(n node, prefix []byte) (node, error) {
func (h2p *Hbss2Pbss) writeNode(pathKey []byte, n *trienode.Node, owner common.Hash) {
if owner == (common.Hash{}) {
- rawdb.WriteAccountTrieNode(h2p.db.diskdb, pathKey, n.Blob)
+ rawdb.WriteAccountTrieNode(h2p.db.DiskDB(), pathKey, n.Blob)
log.Debug("WriteNodes account node, ", "path: ", common.Bytes2Hex(pathKey), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob))
} else {
- rawdb.WriteStorageTrieNode(h2p.db.diskdb, owner, pathKey, n.Blob)
+ rawdb.WriteStorageTrieNode(h2p.db.DiskDB(), owner, pathKey, n.Blob)
log.Debug("WriteNodes storage node, ", "path: ", common.Bytes2Hex(pathKey), "owner: ", owner.String(), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob))
}
}
@@ -85,8 +85,8 @@ func (h2p *Hbss2Pbss) Run() {
log.Info("Total", "complete", h2p.totalNum, "go routines Num", runtime.NumGoroutine, "h2p concurrentQueue", len(h2p.concurrentQueue))
- rawdb.WritePersistentStateID(h2p.db.diskdb, h2p.blocknum)
- rawdb.WriteStateID(h2p.db.diskdb, h2p.stateRootHash, h2p.blocknum)
+ rawdb.WritePersistentStateID(h2p.db.DiskDB(), h2p.blocknum)
+ rawdb.WriteStateID(h2p.db.DiskDB(), h2p.stateRootHash, h2p.blocknum)
}
func (h2p *Hbss2Pbss) SubConcurrentTraversal(theTrie *Trie, theNode node, path []byte) {
diff --git a/trie/inspect_trie.go b/trie/inspect_trie.go
index 5b09cd11ec..885d3c7454 100644
--- a/trie/inspect_trie.go
+++ b/trie/inspect_trie.go
@@ -15,8 +15,10 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/triedb/database"
"github.com/olekukonko/tablewriter"
"github.com/ethereum/go-ethereum/core/rawdb"
@@ -31,9 +33,15 @@ type Account struct {
CodeHash []byte
}
+type Database interface {
+ database.Database
+ Scheme() string
+ Cap(limit common.StorageSize) error
+ DiskDB() ethdb.Database
+}
type Inspector struct {
trie *Trie // traverse trie
- db *Database
+ db Database
stateRootHash common.Hash
blocknum uint64
root node // root of triedb
@@ -113,7 +121,7 @@ func (nodeStat *NodeStat) ValueNodeCount() string {
}
// NewInspector return a inspector obj
-func NewInspector(tr *Trie, db *Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) {
+func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) {
if tr == nil {
return nil, errors.New("trie is nil")
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index 9679b49ca7..41e83f6cb6 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -30,7 +30,7 @@ import (
)
func TestEmptyIterator(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
iter := trie.MustNodeIterator(nil)
seen := make(map[string]struct{})
@@ -43,7 +43,7 @@ func TestEmptyIterator(t *testing.T) {
}
func TestIterator(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -60,7 +60,7 @@ func TestIterator(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
found := make(map[string]string)
@@ -86,7 +86,7 @@ func (k *kv) cmp(other *kv) int {
}
func TestIteratorLargeData(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv)
for i := byte(0); i < 255; i++ {
@@ -205,7 +205,7 @@ var testdata2 = []kvs{
}
func TestIteratorSeek(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for _, val := range testdata1 {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -246,22 +246,22 @@ func checkIteratorOrder(want []kvs, it *Iterator) error {
}
func TestDifferenceIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
found := make(map[string]string)
@@ -288,22 +288,22 @@ func TestDifferenceIterator(t *testing.T) {
}
func TestUnionIterator(t *testing.T) {
- dba := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dba := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
triea := NewEmpty(dba)
for _, val := range testdata1 {
triea.MustUpdate([]byte(val.k), []byte(val.v))
}
rootA, nodesA, _ := triea.Commit(false)
- dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil)
+ dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA))
triea, _ = New(TrieID(rootA), dba)
- dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ dbb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trieb := NewEmpty(dbb)
for _, val := range testdata2 {
trieb.MustUpdate([]byte(val.k), []byte(val.v))
}
rootB, nodesB, _ := trieb.Commit(false)
- dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil)
+ dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB))
trieb, _ = New(TrieID(rootB), dbb)
di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)})
@@ -341,7 +341,8 @@ func TestUnionIterator(t *testing.T) {
}
func TestIteratorNoDups(t *testing.T) {
- tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ tr := NewEmpty(db)
for _, val := range testdata1 {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -365,9 +366,9 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) {
tr.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes, _ := tr.Commit(false)
- tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- tdb.Commit(root, false)
+ tdb.Commit(root)
}
tr, _ = New(TrieID(root), tdb)
wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil)
@@ -481,9 +482,9 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin
break
}
}
- triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, false)
+ triedb.Commit(root)
}
var (
barNodeBlob []byte
@@ -555,8 +556,8 @@ func testIteratorNodeBlob(t *testing.T, scheme string) {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
- triedb.Commit(root, false)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ triedb.Commit(root)
var found = make(map[common.Hash][]byte)
trie, _ = New(TrieID(root), triedb)
diff --git a/trie/node.go b/trie/node.go
index d78ed5c569..50cbad7626 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -27,6 +27,8 @@ import (
var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"}
+type TrieNode = node
+type TrieFullNode = fullNode
type node interface {
cache() (hashNode, bool)
encode(w rlp.EncoderBuffer)
diff --git a/trie/proof.go b/trie/proof.go
index a526a53402..fd892fb4be 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -389,7 +389,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
} else {
if bytes.Compare(cld.Key, key[pos:]) > 0 {
// The key of fork shortnode is greater than the
- // path(it belongs to the range), unset the entrie
+ // path(it belongs to the range), unset the entries
// branch. The parent must be a fullnode.
fn := parent.(*fullNode)
fn.Children[key[pos-1]] = nil
diff --git a/trie/proof_test.go b/trie/proof_test.go
index 59ae201cea..5471d0efa6 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -94,7 +94,7 @@ func TestProof(t *testing.T) {
}
func TestOneElementProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "k", "v")
for i, prover := range makeProvers(trie) {
proof := prover([]byte("k"))
@@ -145,7 +145,7 @@ func TestBadProof(t *testing.T) {
// Tests that missing keys can also be proven. The test explicitly uses a single
// entry trie and checks for missing keys both before and after the single entry.
func TestMissingKeyProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "k", "v")
for i, key := range []string{"a", "j", "l", "z"} {
@@ -343,7 +343,7 @@ func TestOneElementRangeProof(t *testing.T) {
}
// Test the mini trie with only a single element.
- tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ tinyTrie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
entry := &kv{randBytes(32), randBytes(20), false}
tinyTrie.MustUpdate(entry.k, entry.v)
@@ -414,7 +414,7 @@ func TestAllElementsProof(t *testing.T) {
// TestSingleSideRangeProof tests the range starts from zero.
func TestSingleSideRangeProof(t *testing.T) {
for i := 0; i < 64; i++ {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -520,7 +520,7 @@ func TestBadRangeProof(t *testing.T) {
// TestGappedRangeProof focuses on the small trie with embedded nodes.
// If the gapped node is embedded in the trie, it should be detected too.
func TestGappedRangeProof(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv // Sorted entries
for i := byte(0); i < 10; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -592,7 +592,7 @@ func TestSameSideProofs(t *testing.T) {
}
func TestHasRightElement(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
var entries []*kv
for i := 0; i < 4096; i++ {
value := &kv{randBytes(32), randBytes(20), false}
@@ -934,7 +934,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) {
}
func randomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv)
for i := byte(0); i < 100; i++ {
value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false}
@@ -953,7 +953,7 @@ func randomTrie(n int) (*Trie, map[string]*kv) {
}
func nonRandomTrie(n int) (*Trie, map[string]*kv) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := make(map[string]*kv)
max := uint64(0xffffffffffffffff)
for i := uint64(0); i < uint64(n); i++ {
@@ -978,7 +978,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) {
common.Hex2Bytes("02"),
common.Hex2Bytes("03"),
}
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i, key := range keys {
trie.MustUpdate(key, vals[i])
}
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index ffe006c1ff..5462f6eb7e 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -21,6 +21,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb/database"
)
// SecureTrie is the old name of StateTrie.
@@ -29,7 +30,7 @@ type SecureTrie = StateTrie
// NewSecure creates a new StateTrie.
// Deprecated: use NewStateTrie.
-func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *Database) (*SecureTrie, error) {
+func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db database.Database) (*SecureTrie, error) {
id := &ID{
StateRoot: stateRoot,
Owner: owner,
@@ -50,7 +51,7 @@ func NewSecure(stateRoot common.Hash, owner common.Hash, root common.Hash, db *D
// StateTrie is not safe for concurrent use.
type StateTrie struct {
trie Trie
- preimages *preimageStore
+ db database.Database
hashKeyBuf [common.HashLength]byte //nolint:unused
secKeyCache map[string][]byte
secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch
@@ -61,7 +62,7 @@ type StateTrie struct {
// If root is the zero hash or the sha3 hash of an empty string, the
// trie is initially empty. Otherwise, New will panic if db is nil
// and returns MissingNodeError if the root node cannot be found.
-func NewStateTrie(id *ID, db *Database) (*StateTrie, error) {
+func NewStateTrie(id *ID, db database.Database) (*StateTrie, error) {
if db == nil {
panic("trie.NewStateTrie called without a database")
}
@@ -69,7 +70,7 @@ func NewStateTrie(id *ID, db *Database) (*StateTrie, error) {
if err != nil {
return nil, err
}
- return &StateTrie{trie: *trie, preimages: db.preimages}, nil
+ return &StateTrie{trie: *trie, db: db}, nil
}
// MustGet returns the value for key stored in the trie.
@@ -210,10 +211,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
if key, ok := t.getSecKeyCache()[string(shaKey)]; ok {
return key
}
- if t.preimages == nil {
- return nil
- }
- return t.preimages.preimage(common.BytesToHash(shaKey))
+ return t.db.Preimage(common.BytesToHash(shaKey))
}
// Commit collects all dirty nodes in the trie and replaces them with the
@@ -226,13 +224,11 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte {
func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) {
// Write all the pre-images to the actual disk database
if len(t.getSecKeyCache()) > 0 {
- if t.preimages != nil {
- preimages := make(map[common.Hash][]byte)
- for hk, key := range t.secKeyCache {
- preimages[common.BytesToHash([]byte(hk))] = key
- }
- t.preimages.insertPreimage(preimages)
+ preimages := make(map[common.Hash][]byte)
+ for hk, key := range t.secKeyCache {
+ preimages[common.BytesToHash([]byte(hk))] = key
}
+ t.db.InsertPreimage(preimages)
t.secKeyCache = make(map[string][]byte)
}
// Commit the trie and return its modified nodeset.
@@ -249,7 +245,7 @@ func (t *StateTrie) Hash() common.Hash {
func (t *StateTrie) Copy() *StateTrie {
return &StateTrie{
trie: *t.trie.Copy(),
- preimages: t.preimages,
+ db: t.db,
secKeyCache: t.secKeyCache,
}
}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index af67025006..2f0ef5f046 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -32,14 +32,14 @@ import (
)
func newEmptySecure() *StateTrie {
- trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
return trie
}
// makeTestStateTrie creates a large enough secure trie for testing.
-func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
+func makeTestStateTrie() (*testDb, *StateTrie, map[string][]byte) {
// Create an empty trie
- triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb)
// Fill it with some arbitrary data
@@ -62,7 +62,7 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) {
}
}
root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
// Re-create the trie based on the new state
diff --git a/trie/stacktrie_fuzzer_test.go b/trie/stacktrie_fuzzer_test.go
index 1b3f9dbe9c..50b5c4de52 100644
--- a/trie/stacktrie_fuzzer_test.go
+++ b/trie/stacktrie_fuzzer_test.go
@@ -42,10 +42,10 @@ func fuzz(data []byte, debugging bool) {
var (
input = bytes.NewReader(data)
spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbA = NewDatabase(rawdb.NewDatabase(spongeA), nil)
+ dbA = newTestDatabase(rawdb.NewDatabase(spongeA), rawdb.HashScheme)
trieA = NewEmpty(dbA)
spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- dbB = NewDatabase(rawdb.NewDatabase(spongeB), nil)
+ dbB = newTestDatabase(rawdb.NewDatabase(spongeB), rawdb.HashScheme)
options = NewStackTrieOptions().WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(spongeB, common.Hash{}, path, hash, blob, dbB.Scheme())
@@ -87,10 +87,10 @@ func fuzz(data []byte, debugging bool) {
panic(err)
}
if nodes != nil {
- dbA.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ dbA.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
// Flush memdb -> disk (sponge)
- dbA.Commit(rootA, false)
+ dbA.Commit(rootA)
// Stacktrie requires sorted insertion
slices.SortFunc(vals, (*kv).cmp)
diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go
index 909a77062a..3a0e1cb260 100644
--- a/trie/stacktrie_test.go
+++ b/trie/stacktrie_test.go
@@ -223,7 +223,7 @@ func TestStackTrieInsertAndHash(t *testing.T) {
func TestSizeBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -238,7 +238,7 @@ func TestSizeBug(t *testing.T) {
func TestEmptyBug(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -264,7 +264,7 @@ func TestEmptyBug(t *testing.T) {
func TestValLength56(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
//leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
//value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3")
@@ -289,7 +289,7 @@ func TestValLength56(t *testing.T) {
// which causes a lot of node-within-node. This case was found via fuzzing.
func TestUpdateSmallNodes(t *testing.T) {
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
kvs := []struct {
K string
V string
@@ -317,7 +317,7 @@ func TestUpdateSmallNodes(t *testing.T) {
func TestUpdateVariableKeys(t *testing.T) {
t.SkipNow()
st := NewStackTrie(nil)
- nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ nt := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
kvs := []struct {
K string
V string
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 585181b48c..7bc68c041f 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -32,7 +32,7 @@ import (
)
// makeTestTrie create a sample test trie to test node-wise reconstruction.
-func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[string][]byte) {
+func makeTestTrie(scheme string) (ethdb.Database, *testDb, *StateTrie, map[string][]byte) {
// Create an empty trie
db := rawdb.NewMemoryDatabase()
triedb := newTestDatabase(db, scheme)
@@ -58,10 +58,10 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str
}
}
root, nodes, _ := trie.Commit(false)
- if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil {
panic(fmt.Errorf("failed to commit db %v", err))
}
- if err := triedb.Commit(root, false); err != nil {
+ if err := triedb.Commit(root); err != nil {
panic(err)
}
// Re-create the trie based on the new state
@@ -143,7 +143,7 @@ func TestEmptySync(t *testing.T) {
emptyD, _ := New(TrieID(types.EmptyRootHash), dbD)
for i, trie := range []*Trie{emptyA, emptyB, emptyC, emptyD} {
- sync := NewSync(trie.Hash(), memorydb.New(), nil, []*Database{dbA, dbB, dbC, dbD}[i].Scheme())
+ sync := NewSync(trie.Hash(), memorydb.New(), nil, []*testDb{dbA, dbB, dbC, dbD}[i].Scheme())
if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 {
t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, paths, nodes, codes)
}
@@ -684,11 +684,11 @@ func testSyncOrdering(t *testing.T, scheme string) {
}
}
}
-func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database) {
+func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *testDb) {
syncWithHookWriter(t, root, db, srcDb, nil)
}
-func syncWithHookWriter(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database, hookWriter ethdb.KeyValueWriter) {
+func syncWithHookWriter(t *testing.T, root common.Hash, db ethdb.Database, srcDb *testDb, hookWriter ethdb.KeyValueWriter) {
// Create a destination trie and sync with the scheduler
sched := NewSync(root, db, nil, srcDb.Scheme())
@@ -771,10 +771,10 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
diff[string(key)] = val
}
root, nodes, _ := srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
panic(err)
}
- if err := srcDb.Commit(root, false); err != nil {
+ if err := srcDb.Commit(root); err != nil {
panic(err)
}
preRoot = root
@@ -796,10 +796,10 @@ func testSyncMovingTarget(t *testing.T, scheme string) {
reverted[k] = val
}
root, nodes, _ = srcTrie.Commit(false)
- if err := srcDb.Update(root, preRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil {
+ if err := srcDb.Update(root, preRoot, trienode.NewWithNodeSet(nodes)); err != nil {
panic(err)
}
- if err := srcDb.Commit(root, false); err != nil {
+ if err := srcDb.Commit(root); err != nil {
panic(err)
}
srcTrie, _ = NewStateTrie(TrieID(root), srcDb)
@@ -854,10 +854,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA)
rootA, nodesA, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
+ if err := srcTrieDB.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootA, false); err != nil {
+ if err := srcTrieDB.Commit(rootA); err != nil {
panic(err)
}
// Create a destination trie and sync with the scheduler
@@ -873,10 +873,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB)
rootB, nodesB, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
+ if err := srcTrieDB.Update(rootB, rootA, trienode.NewWithNodeSet(nodesB)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootB, false); err != nil {
+ if err := srcTrieDB.Commit(rootB); err != nil {
panic(err)
}
syncWith(t, rootB, destDisk, srcTrieDB)
@@ -891,10 +891,10 @@ func testPivotMove(t *testing.T, scheme string, tiny bool) {
writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC)
rootC, nodesC, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
+ if err := srcTrieDB.Update(rootC, rootB, trienode.NewWithNodeSet(nodesC)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootC, false); err != nil {
+ if err := srcTrieDB.Commit(rootC); err != nil {
panic(err)
}
syncWith(t, rootC, destDisk, srcTrieDB)
@@ -960,10 +960,10 @@ func testSyncAbort(t *testing.T, scheme string) {
writeFn(key, val, srcTrie, stateA)
rootA, nodesA, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil {
+ if err := srcTrieDB.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootA, false); err != nil {
+ if err := srcTrieDB.Commit(rootA); err != nil {
panic(err)
}
// Create a destination trie and sync with the scheduler
@@ -977,10 +977,10 @@ func testSyncAbort(t *testing.T, scheme string) {
deleteFn(key, srcTrie, stateB)
rootB, nodesB, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil {
+ if err := srcTrieDB.Update(rootB, rootA, trienode.NewWithNodeSet(nodesB)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootB, false); err != nil {
+ if err := srcTrieDB.Commit(rootB); err != nil {
panic(err)
}
@@ -1004,10 +1004,10 @@ func testSyncAbort(t *testing.T, scheme string) {
writeFn(key, val, srcTrie, stateC)
rootC, nodesC, _ := srcTrie.Commit(false)
- if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil {
+ if err := srcTrieDB.Update(rootC, rootB, trienode.NewWithNodeSet(nodesC)); err != nil {
panic(err)
}
- if err := srcTrieDB.Commit(rootC, false); err != nil {
+ if err := srcTrieDB.Commit(rootC); err != nil {
panic(err)
}
syncWith(t, rootC, destDisk, srcTrieDB)
diff --git a/trie/tracer_test.go b/trie/tracer_test.go
index acb8c2f6bf..27e42d497a 100644
--- a/trie/tracer_test.go
+++ b/trie/tracer_test.go
@@ -61,7 +61,7 @@ func TestTrieTracer(t *testing.T) {
// Tests if the trie diffs are tracked correctly. Tracer should capture
// all non-leaf dirty nodes, no matter the node is embedded or not.
func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
- db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
// Determine all new nodes are tracked
@@ -71,7 +71,7 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) {
insertSet := copySet(trie.tracer.inserts) // copy before commit
deleteSet := copySet(trie.tracer.deletes) // copy before commit
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
seen := setKeys(iterNodes(db, root))
if !compareSet(insertSet, seen) {
@@ -104,7 +104,8 @@ func TestTrieTracerNoop(t *testing.T) {
}
func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ trie := NewEmpty(db)
for _, val := range vals {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
@@ -128,7 +129,7 @@ func TestAccessList(t *testing.T) {
func testAccessList(t *testing.T, vals []struct{ k, v string }) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db)
orig = trie.Copy()
)
@@ -137,7 +138,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -152,7 +153,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -170,7 +171,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate(key, randBytes(32))
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -185,7 +186,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(key), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -200,7 +201,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
trie.MustUpdate([]byte(val.k), nil)
}
root, nodes, _ = trie.Commit(false)
- db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, parent, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, nodes); err != nil {
@@ -211,7 +212,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) {
// Tests origin values won't be tracked in Iterator or Prover
func TestAccessListLeak(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db)
)
// Create trie from scratch
@@ -219,7 +220,7 @@ func TestAccessListLeak(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
var cases = []struct {
op func(tr *Trie)
@@ -262,14 +263,14 @@ func TestAccessListLeak(t *testing.T) {
// in its parent due to the smaller size of the original tree node.
func TestTinyTree(t *testing.T) {
var (
- db = NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db = newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie = NewEmpty(db)
)
for _, val := range tiny {
trie.MustUpdate([]byte(val.k), randBytes(32))
}
root, set, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set))
parent := root
trie, _ = New(TrieID(root), db)
@@ -278,7 +279,7 @@ func TestTinyTree(t *testing.T) {
trie.MustUpdate([]byte(val.k), []byte(val.v))
}
root, set, _ = trie.Commit(false)
- db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil)
+ db.Update(root, parent, trienode.NewWithNodeSet(set))
trie, _ = New(TrieID(root), db)
if err := verifyAccessList(orig, trie, set); err != nil {
@@ -312,7 +313,7 @@ func forNodes(tr *Trie) map[string][]byte {
return nodes
}
-func iterNodes(db *Database, root common.Hash) map[string][]byte {
+func iterNodes(db *testDb, root common.Hash) map[string][]byte {
tr, _ := New(TrieID(root), db)
return forNodes(tr)
}
diff --git a/trie/trie.go b/trie/trie.go
index 47bdb39548..a65bd596f5 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/ethereum/go-ethereum/triedb/database"
)
// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
@@ -79,7 +80,7 @@ func (t *Trie) Copy() *Trie {
// zero hash or the sha3 hash of an empty string, then trie is initially
// empty, otherwise, the root node must be present in database or returns
// a MissingNodeError if not.
-func New(id *ID, db *Database) (*Trie, error) {
+func New(id *ID, db database.Database) (*Trie, error) {
reader, err := newTrieReader(id.StateRoot, id.Owner, db)
if err != nil {
return nil, err
@@ -100,7 +101,7 @@ func New(id *ID, db *Database) (*Trie, error) {
}
// NewEmpty is a shortcut to create empty tree. It's mostly used in tests.
-func NewEmpty(db *Database) *Trie {
+func NewEmpty(db database.Database) *Trie {
tr, _ := New(TrieID(types.EmptyRootHash), db)
return tr
}
diff --git a/trie/trie_reader.go b/trie/trie_reader.go
index 4215964559..42bc4316fe 100644
--- a/trie/trie_reader.go
+++ b/trie/trie_reader.go
@@ -21,31 +21,19 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie/triestate"
+ "github.com/ethereum/go-ethereum/triedb/database"
)
-// Reader wraps the Node method of a backing trie store.
-type Reader interface {
- // Node retrieves the trie node blob with the provided trie identifier, node path and
- // the corresponding node hash. No error will be returned if the node is not found.
- //
- // When looking up nodes in the account trie, 'owner' is the zero hash. For contract
- // storage trie nodes, 'owner' is the hash of the account address that containing the
- // storage.
- //
- // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS.
- Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
-}
-
// trieReader is a wrapper of the underlying node reader. It's not safe
// for concurrent usage.
type trieReader struct {
owner common.Hash
- reader Reader
+ reader database.Reader
banned map[string]struct{} // Marker to prevent node from being accessed, for tests
}
// newTrieReader initializes the trie reader with the given node reader.
-func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) {
+func newTrieReader(stateRoot, owner common.Hash, db database.Database) (*trieReader, error) {
if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash {
if stateRoot == (common.Hash{}) {
log.Error("Zero state root hash!")
@@ -85,17 +73,22 @@ func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) {
return blob, nil
}
-// trieLoader implements triestate.TrieLoader for constructing tries.
-type trieLoader struct {
- db *Database
+// MerkleLoader implements triestate.TrieLoader for constructing tries.
+type MerkleLoader struct {
+ db database.Database
+}
+
+// NewMerkleLoader creates the merkle trie loader.
+func NewMerkleLoader(db database.Database) *MerkleLoader {
+ return &MerkleLoader{db: db}
}
// OpenTrie opens the main account trie.
-func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
+func (l *MerkleLoader) OpenTrie(root common.Hash) (triestate.Trie, error) {
return New(TrieID(root), l.db)
}
// OpenStorageTrie opens the storage trie of an account.
-func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
+func (l *MerkleLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) {
return New(StorageTrieID(stateRoot, addrHash, root), l.db)
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index fcbd552e22..920594fdd2 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -25,6 +25,7 @@ import (
"io"
"math/rand"
"reflect"
+ "sort"
"testing"
"testing/quick"
@@ -46,7 +47,7 @@ func init() {
}
func TestEmptyTrie(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
res := trie.Hash()
exp := types.EmptyRootHash
if res != exp {
@@ -55,7 +56,7 @@ func TestEmptyTrie(t *testing.T) {
}
func TestNull(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
key := make([]byte, 32)
value := []byte("test")
trie.MustUpdate(key, value)
@@ -95,10 +96,10 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer")
updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf")
root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
if !memonly {
- triedb.Commit(root, false)
+ triedb.Commit(root)
}
trie, _ = New(TrieID(root), triedb)
@@ -167,7 +168,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) {
}
func TestInsert(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -179,7 +180,7 @@ func TestInsert(t *testing.T) {
t.Errorf("case 1: exp %x got %x", exp, root)
}
- trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie = NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab")
@@ -190,7 +191,7 @@ func TestInsert(t *testing.T) {
}
func TestGet(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
updateString(trie, "doe", "reindeer")
updateString(trie, "dog", "puppy")
@@ -209,13 +210,14 @@ func TestGet(t *testing.T) {
return
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
trie, _ = New(TrieID(root), db)
}
}
func TestDelete(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
+ trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
{"ether", "wookiedoo"},
@@ -242,7 +244,7 @@ func TestDelete(t *testing.T) {
}
func TestEmptyValues(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -266,7 +268,7 @@ func TestEmptyValues(t *testing.T) {
}
func TestReplication(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(db)
vals := []struct{ k, v string }{
{"do", "verb"},
@@ -281,7 +283,7 @@ func TestReplication(t *testing.T) {
updateString(trie, val.k, val.v)
}
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// create a new trie on top of the database and check that lookups work.
trie2, err := New(TrieID(root), db)
@@ -300,7 +302,7 @@ func TestReplication(t *testing.T) {
// recreate the trie after commit
if nodes != nil {
- db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
}
trie2, err = New(TrieID(hash), db)
if err != nil {
@@ -327,13 +329,13 @@ func TestReplication(t *testing.T) {
}
func TestLargeValue(t *testing.T) {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99})
trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32))
trie.Hash()
}
-// TestRandomCases tests som cases that were found via random fuzzing
+// TestRandomCases tests some cases that were found via random fuzzing
func TestRandomCases(t *testing.T) {
var rt = []randTestStep{
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
@@ -531,7 +533,7 @@ func runRandTest(rt randTest) error {
case opCommit:
root, nodes, _ := tr.Commit(true)
if nodes != nil {
- triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil)
+ triedb.Update(root, origin, trienode.NewWithNodeSet(nodes))
}
newtr, err := New(TrieID(root), triedb)
if err != nil {
@@ -554,7 +556,7 @@ func runRandTest(rt randTest) error {
checktr.MustUpdate(it.Key, it.Value)
}
if tr.Hash() != checktr.Hash() {
- rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash")
+ rt[i].err = errors.New("hash mismatch in opItercheckhash")
}
case opNodeDiff:
var (
@@ -592,19 +594,19 @@ func runRandTest(rt randTest) error {
}
}
if len(insertExp) != len(tr.tracer.inserts) {
- rt[i].err = fmt.Errorf("insert set mismatch")
+ rt[i].err = errors.New("insert set mismatch")
}
if len(deleteExp) != len(tr.tracer.deletes) {
- rt[i].err = fmt.Errorf("delete set mismatch")
+ rt[i].err = errors.New("delete set mismatch")
}
for insert := range tr.tracer.inserts {
if _, present := insertExp[insert]; !present {
- rt[i].err = fmt.Errorf("missing inserted node")
+ rt[i].err = errors.New("missing inserted node")
}
}
for del := range tr.tracer.deletes {
if _, present := deleteExp[del]; !present {
- rt[i].err = fmt.Errorf("missing deleted node")
+ rt[i].err = errors.New("missing deleted node")
}
}
}
@@ -632,7 +634,7 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) }
const benchElemCount = 20000
func benchGet(b *testing.B) {
- triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
+ triedb := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme)
trie := NewEmpty(triedb)
k := make([]byte, 32)
for i := 0; i < benchElemCount; i++ {
@@ -651,7 +653,7 @@ func benchGet(b *testing.B) {
}
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
k := make([]byte, 32)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
@@ -683,7 +685,7 @@ func BenchmarkHash(b *testing.B) {
// entries, then adding N more.
addresses, accounts := makeAccounts(2 * b.N)
// Insert the accounts into the trie and hash it
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
i := 0
for ; i < len(addresses)/2; i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
@@ -714,7 +716,7 @@ func BenchmarkCommitAfterHash(b *testing.B) {
func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
// Make the random benchmark deterministic
addresses, accounts := makeAccounts(b.N)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -728,7 +730,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) {
func TestTinyTrie(t *testing.T) {
// Create a realistic account trie to hash
_, accounts := makeAccounts(5)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root {
t.Errorf("1: got %x, exp %x", root, exp)
@@ -741,7 +743,7 @@ func TestTinyTrie(t *testing.T) {
if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root {
t.Errorf("3: got %x, exp %x", root, exp)
}
- checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ checktr := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
it := NewIterator(trie.MustNodeIterator(nil))
for it.Next() {
checktr.MustUpdate(it.Key, it.Value)
@@ -754,7 +756,7 @@ func TestTinyTrie(t *testing.T) {
func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash
addresses, accounts := makeAccounts(1000)
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -808,6 +810,8 @@ type spongeDb struct {
sponge hash.Hash
id string
journal []string
+ keys []string
+ values map[string]string
}
func (s *spongeDb) Has(key []byte) (bool, error) { panic("implement me") }
@@ -831,12 +835,27 @@ func (s *spongeDb) Put(key []byte, value []byte) error {
valbrief = valbrief[:8]
}
s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief))
- s.sponge.Write(key)
- s.sponge.Write(value)
+
+ if s.values == nil {
+ s.sponge.Write(key)
+ s.sponge.Write(value)
+ } else {
+ s.keys = append(s.keys, string(key))
+ s.values[string(key)] = string(value)
+ }
return nil
}
func (s *spongeDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { panic("implement me") }
+func (s *spongeDb) Flush() {
+ // Bottom-up, the longest path first
+ sort.Sort(sort.Reverse(sort.StringSlice(s.keys)))
+ for _, key := range s.keys {
+ s.sponge.Write([]byte(key))
+ s.sponge.Write([]byte(s.values[key]))
+ }
+}
+
// spongeBatch is a dummy batch which immediately writes to the underlying spongedb
type spongeBatch struct {
db *spongeDb
@@ -861,14 +880,14 @@ func TestCommitSequence(t *testing.T) {
count int
expWriteSeqHash []byte
}{
- {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066")},
- {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e")},
- {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7")},
+ {20, common.FromHex("330b0afae2853d96b9f015791fbe0fb7f239bf65f335f16dfc04b76c7536276d")},
+ {200, common.FromHex("5162b3735c06b5d606b043a3ee8adbdbbb408543f4966bca9dcc63da82684eeb")},
+ {2000, common.FromHex("4574cd8e6b17f3fe8ad89140d1d0bf4f1bd7a87a8ac3fb623b33550544c77635")},
} {
addresses, accounts := makeAccounts(tc.count)
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s), nil)
+ db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -876,9 +895,9 @@ func TestCommitSequence(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false)
+ db.Commit(root)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
@@ -892,14 +911,14 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
count int
expWriteSeqHash []byte
}{
- {20, common.FromHex("8e4a01548551d139fa9e833ebc4e66fc1ba40a4b9b7259d80db32cff7b64ebbc")},
- {200, common.FromHex("6869b4e7b95f3097a19ddb30ff735f922b915314047e041614df06958fc50554")},
- {2000, common.FromHex("444200e6f4e2df49f77752f629a96ccf7445d4698c164f962bbd85a0526ef424")},
+ {20, common.FromHex("8016650c7a50cf88485fd06cde52d634a89711051107f00d21fae98234f2f13d")},
+ {200, common.FromHex("dde92ca9812e068e6982d04b40846dc65a61a9fd4996fc0f55f2fde172a8e13c")},
+ {2000, common.FromHex("ab553a7f9aff82e3929c382908e30ef7dd17a332933e92ba3fe873fc661ef382")},
} {
prng := rand.New(rand.NewSource(int64(i)))
// This spongeDb is used to check the sequence of disk-db-writes
s := &spongeDb{sponge: sha3.NewLegacyKeccak256()}
- db := NewDatabase(rawdb.NewDatabase(s), nil)
+ db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db)
// Fill the trie with elements
for i := 0; i < tc.count; i++ {
@@ -917,9 +936,9 @@ func TestCommitSequenceRandomBlobs(t *testing.T) {
}
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
// Flush memdb -> disk (sponge)
- db.Commit(root, false)
+ db.Commit(root)
if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) {
t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp)
}
@@ -930,17 +949,26 @@ func TestCommitSequenceStackTrie(t *testing.T) {
for count := 1; count < 200; count++ {
prng := rand.New(rand.NewSource(int64(count)))
// This spongeDb is used to check the sequence of disk-db-writes
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s), nil)
+ s := &spongeDb{
+ sponge: sha3.NewLegacyKeccak256(),
+ id: "a",
+ values: make(map[string]string),
+ }
+ db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db)
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
+ // Another sponge is used for the stacktrie commits
+ stackTrieSponge := &spongeDb{
+ sponge: sha3.NewLegacyKeccak256(),
+ id: "b",
+ values: make(map[string]string),
+ }
options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
stTrie := NewStackTrie(options)
+
// Fill the trie with elements
for i := 0; i < count; i++ {
// For the stack trie, we need to do inserts in proper order
@@ -960,13 +988,16 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
- db.Commit(root, false)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Commit(root)
+ s.Flush()
+
// And flush stacktrie -> disk
stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
+ stackTrieSponge.Flush()
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
// Show the journal
t.Logf("Expected:")
@@ -989,34 +1020,47 @@ func TestCommitSequenceStackTrie(t *testing.T) {
// that even a small trie which contains a leaf will have an extension making it
// not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do.
func TestCommitSequenceSmallRoot(t *testing.T) {
- s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"}
- db := NewDatabase(rawdb.NewDatabase(s), nil)
+ s := &spongeDb{
+ sponge: sha3.NewLegacyKeccak256(),
+ id: "a",
+ values: make(map[string]string),
+ }
+ db := newTestDatabase(rawdb.NewDatabase(s), rawdb.HashScheme)
trie := NewEmpty(db)
- // Another sponge is used for the stacktrie commits
- stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"}
+ // Another sponge is used for the stacktrie commits
+ stackTrieSponge := &spongeDb{
+ sponge: sha3.NewLegacyKeccak256(),
+ id: "b",
+ values: make(map[string]string),
+ }
options := NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(stackTrieSponge, common.Hash{}, path, hash, blob, db.Scheme())
})
stTrie := NewStackTrie(options)
+
// Add a single small-element to the trie(s)
key := make([]byte, 5)
key[0] = 1
trie.Update(key, []byte{0x1})
stTrie.Update(key, []byte{0x1})
+
// Flush trie -> database
root, nodes, _ := trie.Commit(false)
// Flush memdb -> disk (sponge)
- db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
- db.Commit(root, false)
+ db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes))
+ db.Commit(root)
+
// And flush stacktrie -> disk
stRoot := stTrie.Commit()
if stRoot != root {
t.Fatalf("root wrong, got %x exp %x", stRoot, root)
}
-
t.Logf("root: %x\n", stRoot)
+
+ s.Flush()
+ stackTrieSponge.Flush()
if got, exp := stackTrieSponge.sponge.Sum(nil), s.sponge.Sum(nil); !bytes.Equal(got, exp) {
t.Fatalf("test, disk write sequence wrong:\ngot %x exp %x\n", got, exp)
}
@@ -1067,7 +1111,7 @@ func BenchmarkHashFixedSize(b *testing.B) {
func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -1118,7 +1162,7 @@ func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
- trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil))
+ trie := NewEmpty(newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme))
for i := 0; i < len(addresses); i++ {
trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
}
@@ -1129,60 +1173,6 @@ func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accou
b.StopTimer()
}
-func BenchmarkDerefRootFixedSize(b *testing.B) {
- b.Run("10", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(20)
- for i := 0; i < b.N; i++ {
- benchmarkDerefRootFixedSize(b, acc, add)
- }
- })
- b.Run("100", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100)
- for i := 0; i < b.N; i++ {
- benchmarkDerefRootFixedSize(b, acc, add)
- }
- })
-
- b.Run("1K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(1000)
- for i := 0; i < b.N; i++ {
- benchmarkDerefRootFixedSize(b, acc, add)
- }
- })
- b.Run("10K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(10000)
- for i := 0; i < b.N; i++ {
- benchmarkDerefRootFixedSize(b, acc, add)
- }
- })
- b.Run("100K", func(b *testing.B) {
- b.StopTimer()
- acc, add := makeAccounts(100000)
- for i := 0; i < b.N; i++ {
- benchmarkDerefRootFixedSize(b, acc, add)
- }
- })
-}
-
-func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
- b.ReportAllocs()
- triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil)
- trie := NewEmpty(triedb)
- for i := 0; i < len(addresses); i++ {
- trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i])
- }
- h := trie.Hash()
- root, nodes, _ := trie.Commit(false)
- triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil)
- b.StartTimer()
- triedb.Dereference(h)
- b.StopTimer()
-}
-
func getString(trie *Trie, k string) []byte {
return trie.MustGet([]byte(k))
}
diff --git a/trie/verkle.go b/trie/verkle.go
index c21a796a0f..01d813d9ec 100644
--- a/trie/verkle.go
+++ b/trie/verkle.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/utils"
+ "github.com/ethereum/go-ethereum/triedb/database"
"github.com/gballet/go-verkle"
"github.com/holiman/uint256"
)
@@ -39,13 +40,12 @@ var (
// interface so that Verkle trees can be reused verbatim.
type VerkleTrie struct {
root verkle.VerkleNode
- db *Database
cache *utils.PointCache
reader *trieReader
}
// NewVerkleTrie constructs a verkle tree based on the specified root hash.
-func NewVerkleTrie(root common.Hash, db *Database, cache *utils.PointCache) (*VerkleTrie, error) {
+func NewVerkleTrie(root common.Hash, db database.Database, cache *utils.PointCache) (*VerkleTrie, error) {
reader, err := newTrieReader(root, common.Hash{}, db)
if err != nil {
return nil, err
@@ -64,7 +64,6 @@ func NewVerkleTrie(root common.Hash, db *Database, cache *utils.PointCache) (*Ve
}
return &VerkleTrie{
root: node,
- db: db,
cache: cache,
reader: reader,
}, nil
@@ -261,7 +260,6 @@ func (t *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error {
func (t *VerkleTrie) Copy() *VerkleTrie {
return &VerkleTrie{
root: t.root.Copy(),
- db: t.db,
cache: t.cache,
reader: t.reader,
}
diff --git a/trie/verkle_test.go b/trie/verkle_test.go
index 1c65b673aa..0cbe28bf01 100644
--- a/trie/verkle_test.go
+++ b/trie/verkle_test.go
@@ -24,7 +24,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
@@ -57,12 +56,7 @@ var (
)
func TestVerkleTreeReadWrite(t *testing.T) {
- db := NewDatabase(rawdb.NewMemoryDatabase(), &Config{
- IsVerkle: true,
- PathDB: pathdb.Defaults,
- })
- defer db.Close()
-
+ db := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme)
tr, _ := NewVerkleTrie(types.EmptyVerkleHash, db, utils.NewPointCache(100))
for addr, acct := range accounts {
diff --git a/trie/database.go b/triedb/database.go
similarity index 88%
rename from trie/database.go
rename to triedb/database.go
index aaa20257a5..11e7cc33ee 100644
--- a/trie/database.go
+++ b/triedb/database.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package trie
+package triedb
import (
"errors"
@@ -24,10 +24,12 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/trie/triedb/hashdb"
- "github.com/ethereum/go-ethereum/trie/triedb/pathdb"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
+ "github.com/ethereum/go-ethereum/triedb/database"
+ "github.com/ethereum/go-ethereum/triedb/hashdb"
+ "github.com/ethereum/go-ethereum/triedb/pathdb"
)
// Config defines all necessary options for database.
@@ -91,6 +93,12 @@ type Database struct {
// the legacy hash-based scheme is used by default.
func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
// Sanitize the config and use the default one if it's not specified.
+ var triediskdb ethdb.Database
+ if diskdb != nil && diskdb.StateStore() != nil {
+ triediskdb = diskdb.StateStore()
+ } else {
+ triediskdb = diskdb
+ }
dbScheme := rawdb.ReadStateScheme(diskdb)
if config == nil {
if dbScheme == rawdb.PathScheme {
@@ -110,11 +118,11 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
}
var preimages *preimageStore
if config.Preimages {
- preimages = newPreimageStore(diskdb)
+ preimages = newPreimageStore(triediskdb)
}
db := &Database{
config: config,
- diskdb: diskdb,
+ diskdb: triediskdb,
preimages: preimages,
}
/*
@@ -123,25 +131,32 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
* 3. Last, use the default scheme, namely hash scheme
*/
if config.HashDB != nil {
- if rawdb.ReadStateScheme(diskdb) == rawdb.PathScheme {
+ if rawdb.ReadStateScheme(triediskdb) == rawdb.PathScheme {
log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme)
}
- db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
+ db.backend = hashdb.New(triediskdb, config.HashDB, trie.MerkleResolver{})
} else if config.PathDB != nil {
- if rawdb.ReadStateScheme(diskdb) == rawdb.HashScheme {
+ if rawdb.ReadStateScheme(triediskdb) == rawdb.HashScheme {
log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme)
}
- db.backend = pathdb.New(diskdb, config.PathDB)
+ db.backend = pathdb.New(triediskdb, config.PathDB)
} else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 {
if config.PathDB == nil {
config.PathDB = pathdb.Defaults
}
- db.backend = pathdb.New(diskdb, config.PathDB)
+ db.backend = pathdb.New(triediskdb, config.PathDB)
} else {
+ var resolver hashdb.ChildResolver
+ if config.IsVerkle {
+ // TODO define verkle resolver
+ log.Crit("Verkle node resolver is not defined")
+ } else {
+ resolver = trie.MerkleResolver{}
+ }
if config.HashDB == nil {
config.HashDB = hashdb.Defaults
}
- db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{})
+ db.backend = hashdb.New(triediskdb, config.HashDB, resolver)
}
return db
}
@@ -150,9 +165,13 @@ func (db *Database) Config() *Config {
return db.config
}
+func (db *Database) DiskDB() ethdb.Database {
+ return db.diskdb
+}
+
// Reader returns a reader for accessing all trie nodes with provided state root.
// An error will be returned if the requested state is not available.
-func (db *Database) Reader(blockRoot common.Hash) (Reader, error) {
+func (db *Database) Reader(blockRoot common.Hash) (database.Reader, error) {
switch b := db.backend.(type) {
case *hashdb.Database:
return b.Reader(blockRoot)
@@ -227,8 +246,7 @@ func (db *Database) WritePreimages() {
}
}
-// Preimage retrieves a cached trie node pre-image from memory. If it cannot be
-// found cached, the method queries the persistent database for the content.
+// Preimage retrieves a cached trie node pre-image from preimage store.
func (db *Database) Preimage(hash common.Hash) []byte {
if db.preimages == nil {
return nil
@@ -236,6 +254,14 @@ func (db *Database) Preimage(hash common.Hash) []byte {
return db.preimages.preimage(hash)
}
+// InsertPreimage writes pre-images of trie node to the preimage store.
+func (db *Database) InsertPreimage(preimages map[common.Hash][]byte) {
+ if db.preimages == nil {
+ return
+ }
+ db.preimages.insertPreimage(preimages)
+}
+
// Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. The held pre-images accumulated
// up to this point will be flushed in case the size exceeds the threshold.
@@ -286,7 +312,14 @@ func (db *Database) Recover(target common.Hash) error {
if !ok {
return errors.New("not supported")
}
- return pdb.Recover(target, &trieLoader{db: db})
+ var loader triestate.TrieLoader
+ if db.config.IsVerkle {
+ // TODO define verkle loader
+ log.Crit("Verkle loader is not defined")
+ } else {
+ loader = trie.NewMerkleLoader(db)
+ }
+ return pdb.Recover(target, loader)
}
// Recoverable returns the indicator if the specified state is enabled to be
diff --git a/triedb/database/database.go b/triedb/database/database.go
new file mode 100644
index 0000000000..18a8f454e2
--- /dev/null
+++ b/triedb/database/database.go
@@ -0,0 +1,48 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package database
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// Reader wraps the Node method of a backing trie reader.
+type Reader interface {
+ // Node retrieves the trie node blob with the provided trie identifier,
+ // node path and the corresponding node hash. No error will be returned
+ // if the node is not found.
+ Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error)
+}
+
+// PreimageStore wraps the methods of a backing store for reading and writing
+// trie node preimages.
+type PreimageStore interface {
+ // Preimage retrieves the preimage of the specified hash.
+ Preimage(hash common.Hash) []byte
+
+ // InsertPreimage commits a set of preimages along with their hashes.
+ InsertPreimage(preimages map[common.Hash][]byte)
+}
+
+// Database wraps the methods of a backing trie store.
+type Database interface {
+ PreimageStore
+
+ // Reader returns a node reader associated with the specific state.
+ // An error will be returned if the specified state is not available.
+ Reader(stateRoot common.Hash) (Reader, error)
+}
diff --git a/trie/triedb/hashdb/database.go b/triedb/hashdb/database.go
similarity index 100%
rename from trie/triedb/hashdb/database.go
rename to triedb/hashdb/database.go
diff --git a/trie/triedb/pathdb/asyncnodebuffer.go b/triedb/pathdb/asyncnodebuffer.go
similarity index 100%
rename from trie/triedb/pathdb/asyncnodebuffer.go
rename to triedb/pathdb/asyncnodebuffer.go
diff --git a/trie/triedb/pathdb/database.go b/triedb/pathdb/database.go
similarity index 100%
rename from trie/triedb/pathdb/database.go
rename to triedb/pathdb/database.go
diff --git a/trie/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
similarity index 100%
rename from trie/triedb/pathdb/database_test.go
rename to triedb/pathdb/database_test.go
diff --git a/trie/triedb/pathdb/difflayer.go b/triedb/pathdb/difflayer.go
similarity index 100%
rename from trie/triedb/pathdb/difflayer.go
rename to triedb/pathdb/difflayer.go
diff --git a/trie/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
similarity index 100%
rename from trie/triedb/pathdb/difflayer_test.go
rename to triedb/pathdb/difflayer_test.go
diff --git a/trie/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
similarity index 100%
rename from trie/triedb/pathdb/disklayer.go
rename to triedb/pathdb/disklayer.go
diff --git a/trie/triedb/pathdb/errors.go b/triedb/pathdb/errors.go
similarity index 100%
rename from trie/triedb/pathdb/errors.go
rename to triedb/pathdb/errors.go
diff --git a/trie/triedb/pathdb/history.go b/triedb/pathdb/history.go
similarity index 99%
rename from trie/triedb/pathdb/history.go
rename to triedb/pathdb/history.go
index 6e3f3faaed..051e122bec 100644
--- a/trie/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -215,7 +215,7 @@ func (m *meta) encode() []byte {
// decode unpacks the meta object from byte stream.
func (m *meta) decode(blob []byte) error {
if len(blob) < 1 {
- return fmt.Errorf("no version tag")
+ return errors.New("no version tag")
}
switch blob[0] {
case stateHistoryVersion:
diff --git a/trie/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
similarity index 100%
rename from trie/triedb/pathdb/history_test.go
rename to triedb/pathdb/history_test.go
diff --git a/trie/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
similarity index 100%
rename from trie/triedb/pathdb/journal.go
rename to triedb/pathdb/journal.go
diff --git a/trie/triedb/pathdb/layertree.go b/triedb/pathdb/layertree.go
similarity index 100%
rename from trie/triedb/pathdb/layertree.go
rename to triedb/pathdb/layertree.go
diff --git a/trie/triedb/pathdb/metrics.go b/triedb/pathdb/metrics.go
similarity index 100%
rename from trie/triedb/pathdb/metrics.go
rename to triedb/pathdb/metrics.go
diff --git a/trie/triedb/pathdb/nodebuffer.go b/triedb/pathdb/nodebuffer.go
similarity index 100%
rename from trie/triedb/pathdb/nodebuffer.go
rename to triedb/pathdb/nodebuffer.go
diff --git a/trie/triedb/pathdb/testutils.go b/triedb/pathdb/testutils.go
similarity index 100%
rename from trie/triedb/pathdb/testutils.go
rename to triedb/pathdb/testutils.go
diff --git a/trie/preimages.go b/triedb/preimages.go
similarity index 99%
rename from trie/preimages.go
rename to triedb/preimages.go
index 66f34117c1..a5384910f7 100644
--- a/trie/preimages.go
+++ b/triedb/preimages.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package trie
+package triedb
import (
"sync"