diff --git a/cmd/bera-geth/chaincmd.go b/cmd/bera-geth/chaincmd.go
index 7b9082dc9eff..9988d13e4013 100644
--- a/cmd/bera-geth/chaincmd.go
+++ b/cmd/bera-geth/chaincmd.go
@@ -724,7 +724,7 @@ func downloadEra(ctx *cli.Context) error {
case ctx.IsSet(utils.BepoliaFlag.Name):
network = "bepolia"
default:
- return fmt.Errorf("unsupported network, no known era1 checksums")
+ return errors.New("unsupported network, no known era1 checksums")
}
}
diff --git a/cmd/bera-geth/config.go b/cmd/bera-geth/config.go
index d7c354ff9f23..96bd715e8899 100644
--- a/cmd/bera-geth/config.go
+++ b/cmd/bera-geth/config.go
@@ -262,14 +262,16 @@ func makeFullNode(ctx *cli.Context) *node.Node {
if cfg.Ethstats.URL != "" {
utils.RegisterEthStatsService(stack, backend, cfg.Ethstats.URL)
}
- // Configure full-sync tester service if requested
+ // Configure synchronization override service
+ var synctarget common.Hash
if ctx.IsSet(utils.SyncTargetFlag.Name) {
hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
if len(hex) != common.HashLength {
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
}
- utils.RegisterFullSyncTester(stack, eth, common.BytesToHash(hex), ctx.Bool(utils.ExitWhenSyncedFlag.Name))
+ synctarget = common.BytesToHash(hex)
}
+ utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name))
if ctx.IsSet(utils.DeveloperFlag.Name) {
// Start dev mode.
diff --git a/cmd/devp2p/internal/ethtest/conn.go b/cmd/devp2p/internal/ethtest/conn.go
index 4a7a2c76d8fb..5182d71ce19c 100644
--- a/cmd/devp2p/internal/ethtest/conn.go
+++ b/cmd/devp2p/internal/ethtest/conn.go
@@ -129,7 +129,7 @@ func (c *Conn) Write(proto Proto, code uint64, msg any) error {
return err
}
-var errDisc error = fmt.Errorf("disconnect")
+var errDisc error = errors.New("disconnect")
// ReadEth reads an Eth sub-protocol wire message.
func (c *Conn) ReadEth() (any, error) {
diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go
index b5a346c07416..47d00761f325 100644
--- a/cmd/devp2p/internal/ethtest/suite.go
+++ b/cmd/devp2p/internal/ethtest/suite.go
@@ -19,6 +19,7 @@ package ethtest
import (
"context"
"crypto/rand"
+ "errors"
"fmt"
"reflect"
"sync"
@@ -1092,7 +1093,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return
}
if !readUntilDisconnect(conn) {
- errc <- fmt.Errorf("expected bad peer to be disconnected")
+ errc <- errors.New("expected bad peer to be disconnected")
return
}
stage3.Done()
@@ -1139,7 +1140,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
}
if req.GetPooledTransactionsRequest[0] != tx.Hash() {
- errc <- fmt.Errorf("requested unknown tx hash")
+ errc <- errors.New("requested unknown tx hash")
return
}
@@ -1149,7 +1150,7 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
return
}
if readUntilDisconnect(conn) {
- errc <- fmt.Errorf("unexpected disconnect")
+ errc <- errors.New("unexpected disconnect")
return
}
close(errc)
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 684816e4904a..c31ad4b4879f 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -49,10 +49,10 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
"github.com/ethereum/go-ethereum/eth/gasprice"
+ "github.com/ethereum/go-ethereum/eth/syncer"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/remotedb"
@@ -2008,10 +2008,14 @@ func RegisterFilterAPI(stack *node.Node, backend ethapi.Backend, ethcfg *ethconf
return filterSystem
}
-// RegisterFullSyncTester adds the full-sync tester service into node.
-func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) {
- catalyst.RegisterFullSyncTester(stack, eth, target, exitWhenSynced)
- log.Info("Registered full-sync tester", "hash", target, "exitWhenSynced", exitWhenSynced)
+// RegisterSyncOverrideService adds the synchronization override service into node.
+func RegisterSyncOverrideService(stack *node.Node, eth *eth.Ethereum, target common.Hash, exitWhenSynced bool) {
+ if target != (common.Hash{}) {
+ log.Info("Registered sync override service", "hash", target, "exitWhenSynced", exitWhenSynced)
+ } else {
+ log.Info("Registered sync override service")
+ }
+ syncer.Register(stack, eth, target, exitWhenSynced)
}
// SetupMetrics configures the metrics system.
diff --git a/cmd/workload/testsuite.go b/cmd/workload/testsuite.go
index dcb7b67ab1c0..83fb5720dffd 100644
--- a/cmd/workload/testsuite.go
+++ b/cmd/workload/testsuite.go
@@ -18,7 +18,7 @@ package main
import (
"embed"
- "fmt"
+ "errors"
"io/fs"
"os"
@@ -107,7 +107,7 @@ type testConfig struct {
traceTestFile string
}
-var errPrunedHistory = fmt.Errorf("attempt to access pruned history")
+var errPrunedHistory = errors.New("attempt to access pruned history")
// validateHistoryPruneErr checks whether the given error is caused by access
// to history before the pruning threshold block (it is an rpc.Error with code 4444).
@@ -119,7 +119,7 @@ func validateHistoryPruneErr(err error, blockNum uint64, historyPruneBlock *uint
if err != nil {
if rpcErr, ok := err.(rpc.Error); ok && rpcErr.ErrorCode() == 4444 {
if historyPruneBlock != nil && blockNum > *historyPruneBlock {
- return fmt.Errorf("pruned history error returned after pruning threshold")
+ return errors.New("pruned history error returned after pruning threshold")
}
return errPrunedHistory
}
diff --git a/core/blockchain.go b/core/blockchain.go
index d52990ec5adc..0b92a94b6c6a 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -682,7 +682,7 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil || freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned with unknown configuration", "tail", freezerTail)
- return fmt.Errorf("unexpected database tail")
+ return errors.New("unexpected database tail")
}
bc.historyPrunePoint.Store(predefinedPoint)
return nil
@@ -695,15 +695,15 @@ func (bc *BlockChain) initializeHistoryPruning(latest uint64) error {
// action to happen. So just tell them how to do it.
log.Error(fmt.Sprintf("Chain history mode is configured as %q, but database is not pruned.", bc.cfg.ChainHistoryMode.String()))
log.Error(fmt.Sprintf("Run 'geth prune-history' to prune pre-merge history."))
- return fmt.Errorf("history pruning requested via configuration")
+ return errors.New("history pruning requested via configuration")
}
predefinedPoint := history.PrunePoints[bc.genesisBlock.Hash()]
if predefinedPoint == nil {
log.Error("Chain history pruning is not supported for this network", "genesis", bc.genesisBlock.Hash())
- return fmt.Errorf("history pruning requested for unknown network")
+ return errors.New("history pruning requested for unknown network")
} else if freezerTail > 0 && freezerTail != predefinedPoint.BlockNumber {
log.Error("Chain history database is pruned to unknown block", "tail", freezerTail)
- return fmt.Errorf("unexpected database tail")
+ return errors.New("unexpected database tail")
}
bc.historyPrunePoint.Store(predefinedPoint)
return nil
diff --git a/core/rawdb/chain_freezer.go b/core/rawdb/chain_freezer.go
index 4834354b2251..c12f2ab8feab 100644
--- a/core/rawdb/chain_freezer.go
+++ b/core/rawdb/chain_freezer.go
@@ -104,7 +104,7 @@ func (f *chainFreezer) Close() error {
func (f *chainFreezer) readHeadNumber(db ethdb.KeyValueReader) uint64 {
hash := ReadHeadBlockHash(db)
if hash == (common.Hash{}) {
- log.Error("Head block is not reachable")
+ log.Warn("Head block is not reachable")
return 0
}
number, ok := ReadHeaderNumber(db, hash)
diff --git a/core/state/access_list.go b/core/state/access_list.go
index a58c2b20ea96..e3f173886482 100644
--- a/core/state/access_list.go
+++ b/core/state/access_list.go
@@ -145,10 +145,7 @@ func (al *accessList) Equal(other *accessList) bool {
// PrettyPrint prints the contents of the access list in a human-readable form
func (al *accessList) PrettyPrint() string {
out := new(strings.Builder)
- var sortedAddrs []common.Address
- for addr := range al.addresses {
- sortedAddrs = append(sortedAddrs, addr)
- }
+ sortedAddrs := slices.Collect(maps.Keys(al.addresses))
slices.SortFunc(sortedAddrs, common.Address.Cmp)
for _, addr := range sortedAddrs {
idx := al.addresses[addr]
diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go
index e4b396b99037..004dd5298acb 100644
--- a/core/state/snapshot/journal.go
+++ b/core/state/snapshot/journal.go
@@ -350,7 +350,7 @@ func iterateJournal(db ethdb.KeyValueReader, callback journalCallback) error {
}
if len(destructs) > 0 {
log.Warn("Incompatible legacy journal detected", "version", journalV0)
- return fmt.Errorf("incompatible legacy journal detected")
+ return errors.New("incompatible legacy journal detected")
}
}
if err := r.Decode(&accounts); err != nil {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index e80588507981..7aa6780cfa24 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -258,7 +258,7 @@ func (s *StateDB) GetLogs(hash common.Hash, blockNumber uint64, blockHash common
}
func (s *StateDB) Logs() []*types.Log {
- var logs []*types.Log
+ logs := make([]*types.Log, 0, s.logSize)
for _, lgs := range s.logs {
logs = append(logs, lgs...)
}
diff --git a/core/state/transient_storage.go b/core/state/transient_storage.go
index e63db39ebab6..3bb495542553 100644
--- a/core/state/transient_storage.go
+++ b/core/state/transient_storage.go
@@ -18,6 +18,7 @@ package state
import (
"fmt"
+ "maps"
"slices"
"strings"
@@ -70,19 +71,13 @@ func (t transientStorage) Copy() transientStorage {
// PrettyPrint prints the contents of the access list in a human-readable form
func (t transientStorage) PrettyPrint() string {
out := new(strings.Builder)
- var sortedAddrs []common.Address
- for addr := range t {
- sortedAddrs = append(sortedAddrs, addr)
- slices.SortFunc(sortedAddrs, common.Address.Cmp)
- }
+ sortedAddrs := slices.Collect(maps.Keys(t))
+ slices.SortFunc(sortedAddrs, common.Address.Cmp)
for _, addr := range sortedAddrs {
fmt.Fprintf(out, "%#x:", addr)
- var sortedKeys []common.Hash
storage := t[addr]
- for key := range storage {
- sortedKeys = append(sortedKeys, key)
- }
+ sortedKeys := slices.Collect(maps.Keys(storage))
slices.SortFunc(sortedKeys, common.Hash.Cmp)
for _, key := range sortedKeys {
fmt.Fprintf(out, " %X : %X\n", key, storage[key])
diff --git a/core/tracing/journal.go b/core/tracing/journal.go
index 8937d4c5ae22..a402f1ac0985 100644
--- a/core/tracing/journal.go
+++ b/core/tracing/journal.go
@@ -17,7 +17,7 @@
package tracing
import (
- "fmt"
+ "errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -39,14 +39,14 @@ type entry interface {
// WrapWithJournal wraps the given tracer with a journaling layer.
func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks == nil {
- return nil, fmt.Errorf("wrapping nil tracer")
+ return nil, errors.New("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil {
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
- return nil, fmt.Errorf("cannot have both OnNonceChange and OnNonceChangeV2")
+ return nil, errors.New("cannot have both OnNonceChange and OnNonceChangeV2")
}
// Create a new Hooks instance and copy all hooks
diff --git a/core/txpool/validation.go b/core/txpool/validation.go
index 80ba994d1a4a..d4f340108663 100644
--- a/core/txpool/validation.go
+++ b/core/txpool/validation.go
@@ -145,7 +145,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
}
if tx.Type() == types.SetCodeTxType {
if len(tx.SetCodeAuthorizations()) == 0 {
- return fmt.Errorf("set code tx must have at least one authorization tuple")
+ return errors.New("set code tx must have at least one authorization tuple")
}
}
return nil
diff --git a/core/types/bal/bal_encoding.go b/core/types/bal/bal_encoding.go
index d7d08801b111..24dfafa0831f 100644
--- a/core/types/bal/bal_encoding.go
+++ b/core/types/bal/bal_encoding.go
@@ -169,7 +169,7 @@ func (e *AccountAccess) validate() error {
// Convert code change
if len(e.Code) == 1 {
if len(e.Code[0].Code) > params.MaxCodeSize {
- return fmt.Errorf("code change contained oversized code")
+ return errors.New("code change contained oversized code")
}
}
return nil
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index 9dd76c7f9d1f..bbfd3c98db30 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -117,15 +117,16 @@ func (sc *BlobTxSidecar) ToV1() error {
return nil
}
if sc.Version == BlobSidecarVersion0 {
- sc.Proofs = make([]kzg4844.Proof, 0, len(sc.Blobs)*kzg4844.CellProofsPerBlob)
+ proofs := make([]kzg4844.Proof, 0, len(sc.Blobs)*kzg4844.CellProofsPerBlob)
for _, blob := range sc.Blobs {
cellProofs, err := kzg4844.ComputeCellProofs(&blob)
if err != nil {
return err
}
- sc.Proofs = append(sc.Proofs, cellProofs...)
+ proofs = append(proofs, cellProofs...)
}
sc.Version = BlobSidecarVersion1
+ sc.Proofs = proofs
}
return nil
}
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index b65dff602ca0..21307ff5ace7 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -515,7 +515,7 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
}
// enforce size cap for inputs
if c.eip7823 && max(baseLen, expLen, modLen) > 1024 {
- return nil, fmt.Errorf("one or more of base/exponent/modulus length exceeded 1024 bytes")
+ return nil, errors.New("one or more of base/exponent/modulus length exceeded 1024 bytes")
}
// Retrieve the operands and execute the exponentiation
var (
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 8077362ef765..b4fb75aa5da4 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -257,7 +257,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, attributesErr("missing withdrawals")
case params.BeaconRoot == nil:
return engine.STATUS_INVALID, attributesErr("missing beacon root")
- case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague, forks.Osaka):
+ case !api.checkFork(params.Timestamp, forks.Cancun, forks.Prague):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3 must only be called for cancun or prague payloads")
}
}
@@ -693,7 +693,7 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
return invalidStatus, paramsErr("nil beaconRoot post-cancun")
case executionRequests == nil:
return invalidStatus, paramsErr("nil executionRequests post-prague")
- case !api.checkFork(params.Timestamp, forks.Prague, forks.Osaka):
+ case !api.checkFork(params.Timestamp, forks.Prague):
return invalidStatus, unsupportedForkErr("newPayloadV4 must only be called for prague payloads")
}
requests := convertRequests(executionRequests)
diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go
index 6f7d3348abe3..d1fc66a94b44 100644
--- a/eth/catalyst/api_test.go
+++ b/eth/catalyst/api_test.go
@@ -1503,7 +1503,7 @@ func checkEqualBody(a *types.Body, b *engine.ExecutionPayloadBody) error {
}
}
if !reflect.DeepEqual(a.Withdrawals, b.Withdrawals) {
- return fmt.Errorf("withdrawals mismatch")
+ return errors.New("withdrawals mismatch")
}
return nil
}
diff --git a/eth/catalyst/tester.go b/eth/catalyst/tester.go
deleted file mode 100644
index 10a480837e25..000000000000
--- a/eth/catalyst/tester.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2022 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package catalyst
-
-import (
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/eth"
- "github.com/ethereum/go-ethereum/eth/ethconfig"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/node"
-)
-
-// FullSyncTester is an auxiliary service that allows Geth to perform full sync
-// alone without consensus-layer attached. Users must specify a valid block hash
-// as the sync target.
-//
-// This tester can be applied to different networks, no matter it's pre-merge or
-// post-merge, but only for full-sync.
-type FullSyncTester struct {
- stack *node.Node
- backend *eth.Ethereum
- target common.Hash
- closed chan struct{}
- wg sync.WaitGroup
- exitWhenSynced bool
-}
-
-// RegisterFullSyncTester registers the full-sync tester service into the node
-// stack for launching and stopping the service controlled by node.
-func RegisterFullSyncTester(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*FullSyncTester, error) {
- cl := &FullSyncTester{
- stack: stack,
- backend: backend,
- target: target,
- closed: make(chan struct{}),
- exitWhenSynced: exitWhenSynced,
- }
- stack.RegisterLifecycle(cl)
- return cl, nil
-}
-
-// Start launches the beacon sync with provided sync target.
-func (tester *FullSyncTester) Start() error {
- tester.wg.Add(1)
- go func() {
- defer tester.wg.Done()
-
- // Trigger beacon sync with the provided block hash as trusted
- // chain head.
- err := tester.backend.Downloader().BeaconDevSync(ethconfig.FullSync, tester.target, tester.closed)
- if err != nil {
- log.Info("Failed to trigger beacon sync", "err", err)
- }
-
- ticker := time.NewTicker(time.Second * 5)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // Stop in case the target block is already stored locally.
- if block := tester.backend.BlockChain().GetBlockByHash(tester.target); block != nil {
- log.Info("Full-sync target reached", "number", block.NumberU64(), "hash", block.Hash())
-
- if tester.exitWhenSynced {
- go tester.stack.Close() // async since we need to close ourselves
- log.Info("Terminating the node")
- }
- return
- }
-
- case <-tester.closed:
- return
- }
- }
- }()
- return nil
-}
-
-// Stop stops the full-sync tester to stop all background activities.
-// This function can only be called for one time.
-func (tester *FullSyncTester) Stop() error {
- close(tester.closed)
- tester.wg.Wait()
- return nil
-}
diff --git a/eth/catalyst/witness.go b/eth/catalyst/witness.go
index 4c5aedddbdaa..48e8c9ca2843 100644
--- a/eth/catalyst/witness.go
+++ b/eth/catalyst/witness.go
@@ -101,7 +101,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedWithWitnessV3P11(update engine.Forkcho
return engine.STATUS_INVALID, attributesErr("missing beacon root")
case params.ProposerPubkey == nil:
return engine.STATUS_INVALID, attributesErr("missing proposer pubkey")
- case !api.checkFork(params.Timestamp, forks.Prague1):
+ case !api.checkFork(params.Timestamp, forks.Prague1, forks.Osaka):
return engine.STATUS_INVALID, unsupportedForkErr("fcuV3P11 must only be called for prague1 payloads")
}
}
@@ -207,7 +207,7 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4P11(params engine.ExecutableData
return invalidStatus, paramsErr("nil executionRequests post-prague")
case proposerPubkey == nil:
return invalidStatus, paramsErr("nil proposerPubkey post-prague1")
- case !api.checkFork(params.Timestamp, forks.Prague1):
+ case !api.checkFork(params.Timestamp, forks.Prague1, forks.Osaka):
return invalidStatus, unsupportedForkErr("newPayloadV4P11 must only be called for prague1 payloads")
}
requests := convertRequests(executionRequests)
@@ -312,7 +312,7 @@ func (api *ConsensusAPI) ExecuteStatelessPayloadV4P11(params engine.ExecutableDa
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil executionRequests post-prague")
case proposerPubkey == nil:
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, paramsErr("nil proposerPubkey post-prague1")
- case !api.checkFork(params.Timestamp, forks.Prague1):
+ case !api.checkFork(params.Timestamp, forks.Prague1, forks.Osaka):
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, unsupportedForkErr("executeStatelessPayloadV4P11 must only be called for prague1 payloads")
}
requests := convertRequests(executionRequests)
diff --git a/eth/downloader/beacondevsync.go b/eth/downloader/beacondevsync.go
index 0032eb53b966..7b3068413377 100644
--- a/eth/downloader/beacondevsync.go
+++ b/eth/downloader/beacondevsync.go
@@ -18,7 +18,6 @@ package downloader
import (
"errors"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,28 +33,14 @@ import (
// Note, this must not be used in live code. If the forkchcoice endpoint where
// to use this instead of giving us the payload first, then essentially nobody
// in the network would have the block yet that we'd attempt to retrieve.
-func (d *Downloader) BeaconDevSync(mode SyncMode, hash common.Hash, stop chan struct{}) error {
+func (d *Downloader) BeaconDevSync(mode SyncMode, header *types.Header) error {
// Be very loud that this code should not be used in a live node
log.Warn("----------------------------------")
- log.Warn("Beacon syncing with hash as target", "hash", hash)
+ log.Warn("Beacon syncing with hash as target", "number", header.Number, "hash", header.Hash())
log.Warn("This is unhealthy for a live node!")
+ log.Warn("This is incompatible with the consensus layer!")
log.Warn("----------------------------------")
-
- log.Info("Waiting for peers to retrieve sync target")
- for {
- // If the node is going down, unblock
- select {
- case <-stop:
- return errors.New("stop requested")
- default:
- }
- header, err := d.GetHeader(hash)
- if err != nil {
- time.Sleep(time.Second)
- continue
- }
- return d.BeaconSync(mode, header, header)
- }
+ return d.BeaconSync(mode, header, header)
}
// GetHeader tries to retrieve the header with a given hash from a random peer.
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index dcda4e521cbf..09837a304505 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -199,7 +199,7 @@ type BlockChain interface {
// InsertChain inserts a batch of blocks into the local chain.
InsertChain(types.Blocks) (int, error)
- // InterruptInsert whether disables the chain insertion.
+ // InterruptInsert disables or enables chain insertion.
InterruptInsert(on bool)
// InsertReceiptChain inserts a batch of blocks along with their receipts
@@ -513,7 +513,7 @@ func (d *Downloader) syncToHead() (err error) {
//
// For non-merged networks, if there is a checkpoint available, then calculate
// the ancientLimit through that. Otherwise calculate the ancient limit through
- // the advertised height of the remote peer. This most is mostly a fallback for
+ // the advertised height of the remote peer. This is mostly a fallback for
// legacy networks, but should eventually be dropped. TODO(karalabe).
//
// Beacon sync, use the latest finalized block as the ancient limit
@@ -946,7 +946,7 @@ func (d *Downloader) processSnapSyncContent() error {
if !d.committed.Load() {
latest := results[len(results)-1].Header
// If the height is above the pivot block by 2 sets, it means the pivot
- // become stale in the network, and it was garbage collected, move to a
+ // became stale in the network, and it was garbage collected, move to a
// new pivot.
//
// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
@@ -1043,7 +1043,7 @@ func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *state
first, last := results[0].Header, results[len(results)-1].Header
log.Debug("Inserting snap-sync blocks", "items", len(results),
"firstnum", first.Number, "firsthash", first.Hash(),
- "lastnumn", last.Number, "lasthash", last.Hash(),
+ "lastnum", last.Number, "lasthash", last.Hash(),
)
blocks := make([]*types.Block, len(results))
receipts := make([]rlp.RawValue, len(results))
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 669ce003cfe3..c1a31d6e1c2f 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -544,7 +544,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {
tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:])
if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil {
- t.Fatalf("failed to start beacon sync: #{err}")
+ t.Fatalf("failed to start beacon sync: %v", err)
}
select {
case <-complete:
diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go
index 4ebb9bbc98a4..6e5c65eb2079 100644
--- a/eth/downloader/fetchers.go
+++ b/eth/downloader/fetchers.go
@@ -45,9 +45,6 @@ func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amo
defer timeoutTimer.Stop()
select {
- case <-d.cancelCh:
- return nil, nil, errCanceled
-
case <-timeoutTimer.C:
// Header retrieval timed out, update the metrics
p.log.Debug("Header request timed out", "elapsed", ttl)
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 368bda7f8459..99aae4888295 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -18,7 +18,7 @@
package ethconfig
import (
- "fmt"
+ "errors"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -171,7 +171,7 @@ type Config struct {
func CreateConsensusEngine(config *params.ChainConfig, db ethdb.Database) (consensus.Engine, error) {
if config.TerminalTotalDifficulty == nil {
log.Error("Geth only supports PoS networks. Please transition legacy networks using Geth v1.13.x.")
- return nil, fmt.Errorf("'terminalTotalDifficulty' is not set in genesis block")
+ return nil, errors.New("'terminalTotalDifficulty' is not set in genesis block")
}
// Wrap previously supported consensus engines into their post-merge counterpart
if config.Clique != nil {
diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go
index 98a4f74b3e4c..7e9d8125de8c 100644
--- a/eth/gasestimator/gasestimator.go
+++ b/eth/gasestimator/gasestimator.go
@@ -170,7 +170,7 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
break
}
}
- mid := (hi + lo) / 2
+ mid := lo + (hi-lo)/2
if mid > lo*2 {
// Most txs don't need much higher gas limit than their gas used, and most txs don't
// require near the full block limit of gas, so the selection of where to bisect the
diff --git a/eth/protocols/snap/metrics.go b/eth/protocols/snap/metrics.go
index 6878e5b28058..6319a9b75dee 100644
--- a/eth/protocols/snap/metrics.go
+++ b/eth/protocols/snap/metrics.go
@@ -66,4 +66,7 @@ var (
// discarded during the snap sync.
largeStorageDiscardGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/discard", nil)
largeStorageResumedGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/resume", nil)
+
+ stateSyncTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/statesync", nil)
+ stateHealTimeGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/time/stateheal", nil)
)
diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go
index 84ceb9105eae..cf4e49464530 100644
--- a/eth/protocols/snap/sync.go
+++ b/eth/protocols/snap/sync.go
@@ -502,8 +502,10 @@ type Syncer struct {
storageHealed uint64 // Number of storage slots downloaded during the healing stage
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
- startTime time.Time // Time instance when snapshot sync started
- logTime time.Time // Time instance when status was last reported
+ startTime time.Time // Time instance when snapshot sync started
+ healStartTime time.Time // Time instance when the state healing started
+ syncTimeOnce sync.Once // Ensure that the state sync time is uploaded only once
+ logTime time.Time // Time instance when status was last reported
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
@@ -685,6 +687,14 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.cleanStorageTasks()
s.cleanAccountTasks()
if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {
+ // State healing phase completed, record the elapsed time in metrics.
+ // Note: healing may be rerun in subsequent cycles to fill gaps between
+ // pivot states (e.g., if chain sync takes longer).
+ if !s.healStartTime.IsZero() {
+ stateHealTimeGauge.Inc(int64(time.Since(s.healStartTime)))
+ log.Info("State healing phase is completed", "elapsed", common.PrettyDuration(time.Since(s.healStartTime)))
+ s.healStartTime = time.Time{}
+ }
return nil
}
// Assign all the data retrieval tasks to any free peers
@@ -693,7 +703,17 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.assignStorageTasks(storageResps, storageReqFails, cancel)
if len(s.tasks) == 0 {
- // Sync phase done, run heal phase
+ // State sync phase completed, record the elapsed time in metrics.
+ // Note: the initial state sync runs only once, regardless of whether
+ // a new cycle is started later. Any state differences in subsequent
+ // cycles will be handled by the state healer.
+ s.syncTimeOnce.Do(func() {
+ stateSyncTimeGauge.Update(int64(time.Since(s.startTime)))
+ log.Info("State sync phase is completed", "elapsed", common.PrettyDuration(time.Since(s.startTime)))
+ })
+ if s.healStartTime.IsZero() {
+ s.healStartTime = time.Now()
+ }
s.assignTrienodeHealTasks(trienodeHealResps, trienodeHealReqFails, cancel)
s.assignBytecodeHealTasks(bytecodeHealResps, bytecodeHealReqFails, cancel)
}
diff --git a/eth/syncer/syncer.go b/eth/syncer/syncer.go
new file mode 100644
index 000000000000..5c4d2401e9f4
--- /dev/null
+++ b/eth/syncer/syncer.go
@@ -0,0 +1,197 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package syncer
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/eth/ethconfig"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+type syncReq struct {
+ hash common.Hash
+ errc chan error
+}
+
+// Syncer is an auxiliary service that allows Geth to perform full sync
+// alone without consensus-layer attached. Users must specify a valid block hash
+// as the sync target.
+//
+// This tool can be applied to different networks, no matter it's pre-merge or
+// post-merge, but only for full-sync.
+type Syncer struct {
+ stack *node.Node
+ backend *eth.Ethereum
+ target common.Hash
+ request chan *syncReq
+ closed chan struct{}
+ wg sync.WaitGroup
+ exitWhenSynced bool
+}
+
+// Register registers the synchronization override service into the node
+// stack for launching and stopping the service controlled by node.
+func Register(stack *node.Node, backend *eth.Ethereum, target common.Hash, exitWhenSynced bool) (*Syncer, error) {
+ s := &Syncer{
+ stack: stack,
+ backend: backend,
+ target: target,
+ request: make(chan *syncReq),
+ closed: make(chan struct{}),
+ exitWhenSynced: exitWhenSynced,
+ }
+ stack.RegisterAPIs(s.APIs())
+ stack.RegisterLifecycle(s)
+ return s, nil
+}
+
+// APIs return the collection of RPC services the ethereum package offers.
+// NOTE, some of these services probably need to be moved to somewhere else.
+func (s *Syncer) APIs() []rpc.API {
+ return []rpc.API{
+ {
+ Namespace: "debug",
+ Service: NewAPI(s),
+ },
+ }
+}
+
+// run is the main loop that monitors sync requests from users and initiates
+// sync operations when necessary. It also checks whether the specified target
+// has been reached and shuts down Geth if requested by the user.
+func (s *Syncer) run() {
+ defer s.wg.Done()
+
+ var (
+ target *types.Header
+ ticker = time.NewTicker(time.Second * 5)
+ )
+ for {
+ select {
+ case req := <-s.request:
+ var (
+ resync bool
+ retries int
+ logged bool
+ )
+ for {
+ if retries >= 10 {
+ req.errc <- fmt.Errorf("sync target is not avaibale, %x", req.hash)
+ break
+ }
+ select {
+ case <-s.closed:
+ req.errc <- errors.New("syncer closed")
+ return
+ default:
+ }
+
+ header, err := s.backend.Downloader().GetHeader(req.hash)
+ if err != nil {
+ if !logged {
+ logged = true
+ log.Info("Waiting for peers to retrieve sync target", "hash", req.hash)
+ }
+ time.Sleep(time.Second * time.Duration(retries+1))
+ retries++
+ continue
+ }
+ if target != nil && header.Number.Cmp(target.Number) <= 0 {
+ req.errc <- fmt.Errorf("stale sync target, current: %d, received: %d", target.Number, header.Number)
+ break
+ }
+ target = header
+ resync = true
+ break
+ }
+ if resync {
+ req.errc <- s.backend.Downloader().BeaconDevSync(ethconfig.FullSync, target)
+ }
+
+ case <-ticker.C:
+ if target == nil || !s.exitWhenSynced {
+ continue
+ }
+ if block := s.backend.BlockChain().GetBlockByHash(target.Hash()); block != nil {
+ log.Info("Sync target reached", "number", block.NumberU64(), "hash", block.Hash())
+ go s.stack.Close() // async since we need to close ourselves
+ return
+ }
+
+ case <-s.closed:
+ return
+ }
+ }
+}
+
+// Start launches the synchronization service.
+func (s *Syncer) Start() error {
+ s.wg.Add(1)
+ go s.run()
+ if s.target == (common.Hash{}) {
+ return nil
+ }
+ return s.Sync(s.target)
+}
+
+// Stop terminates the synchronization service and stop all background activities.
+// This function can only be called for one time.
+func (s *Syncer) Stop() error {
+ close(s.closed)
+ s.wg.Wait()
+ return nil
+}
+
+// Sync sets the synchronization target. Notably, setting a target lower than the
+// previous one is not allowed, as backward synchronization is not supported.
+func (s *Syncer) Sync(hash common.Hash) error {
+ req := &syncReq{
+ hash: hash,
+ errc: make(chan error, 1),
+ }
+ select {
+ case s.request <- req:
+ return <-req.errc
+ case <-s.closed:
+ return errors.New("syncer is closed")
+ }
+}
+
+// API is the collection of synchronization service APIs for debugging the
+// protocol.
+type API struct {
+ s *Syncer
+}
+
+// NewAPI creates a new debug API instance.
+func NewAPI(s *Syncer) *API {
+ return &API{s: s}
+}
+
+// Sync initiates a full sync to the target block hash.
+func (api *API) Sync(target common.Hash) error {
+ return api.s.Sync(target)
+}
diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go
index 7a399d41f3d0..ee20cd171abd 100644
--- a/ethclient/simulated/backend_test.go
+++ b/ethclient/simulated/backend_test.go
@@ -52,7 +52,7 @@ func simTestBackend(testAddr common.Address) *Backend {
)
}
-func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
+func newBlobTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client()
testBlob := &kzg4844.Blob{0x00}
@@ -67,12 +67,8 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
- nonce, err := client.PendingNonceAt(context.Background(), addr)
- if err != nil {
- return nil, err
- }
-
chainidU256, _ := uint256.FromBig(chainid)
+
tx := types.NewTx(&types.BlobTx{
ChainID: chainidU256,
GasTipCap: gasTipCapU256,
@@ -88,7 +84,7 @@ func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error)
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
}
-func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
+func newTx(sim *Backend, key *ecdsa.PrivateKey, nonce uint64) (*types.Transaction, error) {
client := sim.Client()
// create a signed transaction to send
@@ -96,10 +92,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
- nonce, err := client.PendingNonceAt(context.Background(), addr)
- if err != nil {
- return nil, err
- }
+
tx := types.NewTx(&types.DynamicFeeTx{
ChainID: chainid,
Nonce: nonce,
@@ -161,7 +154,7 @@ func TestSendTransaction(t *testing.T) {
client := sim.Client()
ctx := context.Background()
- signedTx, err := newTx(sim, testKey)
+ signedTx, err := newTx(sim, testKey, 0)
if err != nil {
t.Errorf("could not create transaction: %v", err)
}
@@ -252,7 +245,7 @@ func TestForkResendTx(t *testing.T) {
parent, _ := client.HeaderByNumber(ctx, nil)
// 2.
- tx, err := newTx(sim, testKey)
+ tx, err := newTx(sim, testKey, 0)
if err != nil {
t.Fatalf("could not create transaction: %v", err)
}
@@ -297,7 +290,7 @@ func TestCommitReturnValue(t *testing.T) {
}
// Create a block in the original chain (containing a transaction to force different block hashes)
- tx, _ := newTx(sim, testKey)
+ tx, _ := newTx(sim, testKey, 0)
if err := client.SendTransaction(ctx, tx); err != nil {
t.Errorf("sending transaction: %v", err)
}
diff --git a/ethclient/simulated/rollback_test.go b/ethclient/simulated/rollback_test.go
index 57c59496d5b1..093467d2910e 100644
--- a/ethclient/simulated/rollback_test.go
+++ b/ethclient/simulated/rollback_test.go
@@ -38,9 +38,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
defer sim.Close()
client := sim.Client()
- btx0 := testSendSignedTx(t, testKey, sim, true)
- tx0 := testSendSignedTx(t, testKey2, sim, false)
- tx1 := testSendSignedTx(t, testKey2, sim, false)
+ btx0 := testSendSignedTx(t, testKey, sim, true, 0)
+ tx0 := testSendSignedTx(t, testKey2, sim, false, 0)
+ tx1 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Rollback()
@@ -48,9 +48,9 @@ func TestTransactionRollbackBehavior(t *testing.T) {
t.Fatalf("all transactions were not rolled back")
}
- btx2 := testSendSignedTx(t, testKey, sim, true)
- tx2 := testSendSignedTx(t, testKey2, sim, false)
- tx3 := testSendSignedTx(t, testKey2, sim, false)
+ btx2 := testSendSignedTx(t, testKey, sim, true, 0)
+ tx2 := testSendSignedTx(t, testKey2, sim, false, 0)
+ tx3 := testSendSignedTx(t, testKey2, sim, false, 1)
sim.Commit()
@@ -61,7 +61,7 @@ func TestTransactionRollbackBehavior(t *testing.T) {
// testSendSignedTx sends a signed transaction to the simulated backend.
// It does not commit the block.
-func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool) *types.Transaction {
+func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool, nonce uint64) *types.Transaction {
t.Helper()
client := sim.Client()
ctx := context.Background()
@@ -71,9 +71,9 @@ func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobT
signedTx *types.Transaction
)
if isBlobTx {
- signedTx, err = newBlobTx(sim, key)
+ signedTx, err = newBlobTx(sim, key, nonce)
} else {
- signedTx, err = newTx(sim, key)
+ signedTx, err = newTx(sim, key, nonce)
}
if err != nil {
t.Fatalf("failed to create transaction: %v", err)
@@ -96,13 +96,13 @@ func pendingStateHasTx(client Client, tx *types.Transaction) bool {
)
// Poll for receipt with timeout
- deadline := time.Now().Add(2 * time.Second)
+ deadline := time.Now().Add(200 * time.Millisecond)
for time.Now().Before(deadline) {
receipt, err = client.TransactionReceipt(ctx, tx.Hash())
if err == nil && receipt != nil {
break
}
- time.Sleep(100 * time.Millisecond)
+ time.Sleep(5 * time.Millisecond)
}
if err != nil {
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 736a44d73d62..8e1bb86fec4e 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -22,6 +22,7 @@ package leveldb
import (
"bytes"
+ "errors"
"fmt"
"sync"
"time"
@@ -31,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/syndtr/goleveldb/leveldb"
- "github.com/syndtr/goleveldb/leveldb/errors"
+ lerrors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
@@ -120,7 +121,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
// Open the db and recover any potential corruptions
db, err := leveldb.OpenFile(file, options)
- if _, corrupted := err.(*errors.ErrCorrupted); corrupted {
+ if _, corrupted := err.(*lerrors.ErrCorrupted); corrupted {
db, err = leveldb.RecoverFile(file, nil)
}
if err != nil {
@@ -548,7 +549,7 @@ func (r *replayer) DeleteRange(start, end []byte) {
if rangeDeleter, ok := r.writer.(ethdb.KeyValueRangeDeleter); ok {
r.failure = rangeDeleter.DeleteRange(start, end)
} else {
- r.failure = fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ r.failure = errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
}
diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go
index 5c4c48de6490..200ad6024568 100644
--- a/ethdb/memorydb/memorydb.go
+++ b/ethdb/memorydb/memorydb.go
@@ -20,7 +20,6 @@ package memorydb
import (
"bytes"
"errors"
- "fmt"
"sort"
"strings"
"sync"
@@ -327,7 +326,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err
}
} else {
- return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
}
continue
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 58a521f6fb6b..2370d4654f34 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -18,6 +18,7 @@
package pebble
import (
+ "errors"
"fmt"
"runtime"
"strings"
@@ -705,7 +706,7 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
return err
}
} else {
- return fmt.Errorf("ethdb.KeyValueWriter does not implement DeleteRange")
+ return errors.New("ethdb.KeyValueWriter does not implement DeleteRange")
}
} else {
return fmt.Errorf("unhandled operation, keytype: %v", kind)
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index a6d93fc1c531..d7f37a79eea4 100644
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -468,6 +468,11 @@ web3._extend({
call: 'debug_getTrieFlushInterval',
params: 0
}),
+ new web3._extend.Method({
+ name: 'sync',
+ call: 'debug_sync',
+ params: 1
+ }),
],
properties: []
});
diff --git a/p2p/rlpx/rlpx.go b/p2p/rlpx/rlpx.go
index dd14822dee76..c074534d4dec 100644
--- a/p2p/rlpx/rlpx.go
+++ b/p2p/rlpx/rlpx.go
@@ -33,6 +33,7 @@ import (
"net"
"time"
+ "github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/rlp"
@@ -676,8 +677,6 @@ func exportPubkey(pub *ecies.PublicKey) []byte {
func xor(one, other []byte) (xor []byte) {
xor = make([]byte, len(one))
- for i := 0; i < len(one); i++ {
- xor[i] = one[i] ^ other[i]
- }
+ bitutil.XORBytes(xor, one, other)
return xor
}
diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go
index 66c750a9c30a..b5fd5a285401 100644
--- a/signer/core/apitypes/types.go
+++ b/signer/core/apitypes/types.go
@@ -151,7 +151,7 @@ func (args *SendTxArgs) ToTransaction() (*types.Transaction, error) {
al = *args.AccessList
}
if to == nil {
- return nil, fmt.Errorf("transaction recipient must be set for blob transactions")
+ return nil, errors.New("transaction recipient must be set for blob transactions")
}
data = &types.BlobTx{
To: *to,
diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go
index b2efabe82e01..a90c2d522f64 100644
--- a/tests/transaction_test_util.go
+++ b/tests/transaction_test_util.go
@@ -17,6 +17,7 @@
package tests
import (
+ "errors"
"fmt"
"math/big"
@@ -43,7 +44,7 @@ type ttFork struct {
func (tt *TransactionTest) validate() error {
if tt.Txbytes == nil {
- return fmt.Errorf("missing txbytes")
+ return errors.New("missing txbytes")
}
for name, fork := range tt.Result {
if err := tt.validateFork(fork); err != nil {
@@ -58,10 +59,10 @@ func (tt *TransactionTest) validateFork(fork *ttFork) error {
return nil
}
if fork.Hash == nil && fork.Exception == nil {
- return fmt.Errorf("missing hash and exception")
+ return errors.New("missing hash and exception")
}
if fork.Hash != nil && fork.Sender == nil {
- return fmt.Errorf("missing sender")
+ return errors.New("missing sender")
}
return nil
}
diff --git a/triedb/pathdb/history_index.go b/triedb/pathdb/history_index.go
index f79581b38b19..e781a898e1a4 100644
--- a/triedb/pathdb/history_index.go
+++ b/triedb/pathdb/history_index.go
@@ -353,7 +353,7 @@ func (d *indexDeleter) empty() bool {
// pop removes the last written element from the index writer.
func (d *indexDeleter) pop(id uint64) error {
if id == 0 {
- return fmt.Errorf("zero history ID is not valid")
+ return errors.New("zero history ID is not valid")
}
if id != d.lastID {
return fmt.Errorf("pop element out of order, last: %d, this: %d", d.lastID, id)
diff --git a/triedb/pathdb/history_index_block.go b/triedb/pathdb/history_index_block.go
index 10cc88ed4ed2..7648b99226c9 100644
--- a/triedb/pathdb/history_index_block.go
+++ b/triedb/pathdb/history_index_block.go
@@ -221,17 +221,14 @@ func (br *blockReader) readGreaterThan(id uint64) (uint64, error) {
type blockWriter struct {
desc *indexBlockDesc // Descriptor of the block
restarts []uint16 // Offsets into the data slice, marking the start of each section
- scratch []byte // Buffer used for encoding full integers or value differences
data []byte // Aggregated encoded data slice
}
func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) {
- scratch := make([]byte, binary.MaxVarintLen64)
if len(blob) == 0 {
return &blockWriter{
- desc: desc,
- scratch: scratch,
- data: make([]byte, 0, 1024),
+ desc: desc,
+ data: make([]byte, 0, 1024),
}, nil
}
restarts, data, err := parseIndexBlock(blob)
@@ -241,7 +238,6 @@ func newBlockWriter(blob []byte, desc *indexBlockDesc) (*blockWriter, error) {
return &blockWriter{
desc: desc,
restarts: restarts,
- scratch: scratch,
data: data, // safe to own the slice
}, nil
}
@@ -268,22 +264,14 @@ func (b *blockWriter) append(id uint64) error {
//
// The first element in a restart range is encoded using its
// full value.
- n := binary.PutUvarint(b.scratch[0:], id)
- b.data = append(b.data, b.scratch[:n]...)
+ b.data = binary.AppendUvarint(b.data, id)
} else {
- // The current section is not full, append the element.
// The element which is not the first one in the section
// is encoded using the value difference from the preceding
// element.
- n := binary.PutUvarint(b.scratch[0:], id-b.desc.max)
- b.data = append(b.data, b.scratch[:n]...)
+ b.data = binary.AppendUvarint(b.data, id-b.desc.max)
}
b.desc.entries++
-
- // The state history ID must be greater than 0.
- //if b.desc.min == 0 {
- // b.desc.min = id
- //}
b.desc.max = id
return nil
}
@@ -392,11 +380,10 @@ func (b *blockWriter) full() bool {
//
// This function is safe to be called multiple times.
func (b *blockWriter) finish() []byte {
- var buf []byte
- for _, number := range b.restarts {
- binary.BigEndian.PutUint16(b.scratch[:2], number)
- buf = append(buf, b.scratch[:2]...)
+ buf := make([]byte, len(b.restarts)*2+1)
+ for i, restart := range b.restarts {
+ binary.BigEndian.PutUint16(buf[2*i:], restart)
}
- buf = append(buf, byte(len(b.restarts)))
+ buf[len(buf)-1] = byte(len(b.restarts))
return append(b.data, buf...)
}
diff --git a/triedb/pathdb/history_index_block_test.go b/triedb/pathdb/history_index_block_test.go
index 7b0e362c6691..c251cea2ecb9 100644
--- a/triedb/pathdb/history_index_block_test.go
+++ b/triedb/pathdb/history_index_block_test.go
@@ -232,3 +232,22 @@ func BenchmarkParseIndexBlock(b *testing.B) {
}
}
}
+
+// BenchmarkBlockWriterAppend benchmarks the performance of indexblock.writer
+func BenchmarkBlockWriterAppend(b *testing.B) {
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ desc := newIndexBlockDesc(0)
+ writer, _ := newBlockWriter(nil, desc)
+
+ for i := 0; i < b.N; i++ {
+ if writer.full() {
+ desc = newIndexBlockDesc(0)
+ writer, _ = newBlockWriter(nil, desc)
+ }
+ if err := writer.append(writer.desc.max + 1); err != nil {
+ b.Error(err)
+ }
+ }
+}
diff --git a/triedb/pathdb/history_indexer.go b/triedb/pathdb/history_indexer.go
index 42103fab32c9..054d43e946d3 100644
--- a/triedb/pathdb/history_indexer.go
+++ b/triedb/pathdb/history_indexer.go
@@ -392,16 +392,17 @@ func (i *indexIniter) run(lastID uint64) {
select {
case signal := <-i.interrupt:
// The indexing limit can only be extended or shortened continuously.
- if signal.newLastID != lastID+1 && signal.newLastID != lastID-1 {
- signal.result <- fmt.Errorf("invalid history id, last: %d, got: %d", lastID, signal.newLastID)
+ newLastID := signal.newLastID
+ if newLastID != lastID+1 && newLastID != lastID-1 {
+ signal.result <- fmt.Errorf("invalid history id, last: %d, got: %d", lastID, newLastID)
continue
}
- i.last.Store(signal.newLastID) // update indexing range
+ i.last.Store(newLastID) // update indexing range
// The index limit is extended by one, update the limit without
// interrupting the current background process.
- if signal.newLastID == lastID+1 {
- lastID = signal.newLastID
+ if newLastID == lastID+1 {
+ lastID = newLastID
signal.result <- nil
log.Debug("Extended state history range", "last", lastID)
continue
@@ -425,7 +426,9 @@ func (i *indexIniter) run(lastID uint64) {
return
}
// Adjust the indexing target and relaunch the process
- lastID = signal.newLastID
+ lastID = newLastID
+ signal.result <- nil
+
done, interrupt = make(chan struct{}), new(atomic.Int32)
go i.index(done, interrupt, lastID)
log.Debug("Shortened state history range", "last", lastID)
diff --git a/triedb/pathdb/history_indexer_test.go b/triedb/pathdb/history_indexer_test.go
new file mode 100644
index 000000000000..abfcafc94545
--- /dev/null
+++ b/triedb/pathdb/history_indexer_test.go
@@ -0,0 +1,57 @@
+// Copyright 2025 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/rawdb"
+)
+
+// TestHistoryIndexerShortenDeadlock tests that a call to shorten does not
+// deadlock when the indexer is active. This specifically targets the case where
+// signal.result must be sent to unblock the caller.
+func TestHistoryIndexerShortenDeadlock(t *testing.T) {
+ //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
+ db := rawdb.NewMemoryDatabase()
+ freezer, _ := rawdb.NewStateFreezer(t.TempDir(), false, false)
+ defer freezer.Close()
+
+ histories := makeHistories(100)
+ for i, h := range histories {
+ accountData, storageData, accountIndex, storageIndex := h.encode()
+ rawdb.WriteStateHistory(freezer, uint64(i+1), h.meta.encode(), accountIndex, storageIndex, accountData, storageData)
+ }
+ // As a workaround, assign a future block to keep the initer running indefinitely
+ indexer := newHistoryIndexer(db, freezer, 200)
+ defer indexer.close()
+
+ done := make(chan error, 1)
+ go func() {
+ done <- indexer.shorten(200)
+ }()
+
+ select {
+ case err := <-done:
+ if err != nil {
+ t.Fatalf("shorten returned an unexpected error: %v", err)
+ }
+ case <-time.After(2 * time.Second):
+ t.Fatal("timed out waiting for shorten to complete, potential deadlock")
+ }
+}